diff --git a/.circleci/config.yml b/.circleci/config.yml index eb267dffd7fb..8c2b443f1e84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.8 + - image: cimg/python:3.11.10 working_directory: ~/repo @@ -54,29 +54,22 @@ jobs: command: | python3.11 -m venv venv . venv/bin/activate - pip install --progress-bar=off -r requirements/test_requirements.txt + pip install --progress-bar=off -r requirements/test_requirements.txt \ + -r requirements/build_requirements.txt \ + -r requirements/ci_requirements.txt # get newer, pre-release versions of critical packages - pip install --progress-bar=off --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple -r requirements/doc_requirements.txt + pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above - pip install . --config-settings=setup-args="-Dallow-noblas=true" - - - run: - name: create release notes - command: | - . venv/bin/activate - VERSION=$(pip show numpy | grep Version: | cut -d ' ' -f 2 | cut -c 1-5) - towncrier build --version $VERSION --yes - ./tools/ci/test_all_newsfragments_used.py + spin build --with-scipy-openblas=64 -j 2 - run: name: build devdocs w/ref warnings command: | . venv/bin/activate - cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-W -n" make -e html - if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then - echo "doc build failed: build/html is empty" + SPHINXOPTS="-W -n" spin docs + if [[ $(find doc/build/html -type f | wc -l) -lt 1000 ]]; then + echo "doc build failed: doc/build/html is empty" exit -1 fi @@ -95,10 +88,17 @@ jobs: # destination: neps - run: - name: run refguide-check + name: check doctests command: | . venv/bin/activate - python tools/refguide_check.py -v + spin check-docs -v + spin check-tutorials -v + # Currently, this does two checks not done by check-docs: + # - validates ReST blocks (via validate_rst_syntax) + # - checks that all of a module's `__all__` is reflected in the + # module-level docstring autosummary + echo calling python3 tools/refguide_check.py -v + python3 tools/refguide_check.py -v - persist_to_workspace: root: ~/repo diff --git a/.editorconfig b/.editorconfig index 5fdaee55c25d..99b30c52b07f 100644 --- a/.editorconfig +++ b/.editorconfig @@ -6,3 +6,21 @@ indent_size = 4 indent_style = space max_line_length = 80 trim_trailing_whitespace = true + +[*.{py,pyi,pxd}] +# https://peps.python.org/pep-0008/ +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.py] +# Keep in sync with `tools/lint_diff.ini` and `tools/linter.py` +# https://pycodestyle.pycqa.org/en/latest/intro.html#configuration +max_line_length = 88 + +[*.pyi] +# https://typing.readthedocs.io/en/latest/guides/writing_stubs.html#style-guide +max_line_length = 130 diff --git a/.github/ISSUE_TEMPLATE/typing.yml b/.github/ISSUE_TEMPLATE/typing.yml index a35b339e4883..17eedfae1c6c 100644 --- a/.github/ISSUE_TEMPLATE/typing.yml +++ b/.github/ISSUE_TEMPLATE/typing.yml @@ -1,7 +1,7 @@ name: Static Typing description: Report an issue with the NumPy typing hints. title: "TYP: " -labels: [Static typing] +labels: [41 - Static typing] body: - type: markdown diff --git a/.github/pr-prefix-labeler.yml b/.github/pr-prefix-labeler.yml index 4905b502045d..65ed35aa1a11 100644 --- a/.github/pr-prefix-labeler.yml +++ b/.github/pr-prefix-labeler.yml @@ -12,5 +12,5 @@ "REV": "34 - Reversion" "STY": "03 - Maintenance" "TST": "05 - Testing" -"TYP": "static typing" +"TYP": "41 - Static typing" "WIP": "25 - WIP" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 467400d99336..e6f06f4f886d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 + uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml new file mode 100644 index 000000000000..9477e0be1bd1 --- /dev/null +++ b/.github/workflows/compiler_sanitizers.yml @@ -0,0 +1,127 @@ +name: Test with compiler sanitizers + +on: + push: + branches: + - main + pull_request: + branches: + - main + - maintenance/** + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + clang_ASAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@19 + LLVM_PREFIX=$(brew --prefix llvm@19) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + pyenv global 3.13 + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=address + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:halt_on_error=1 \ + python -m spin test -- -v -s --timeout=600 --durations=10 + + clang_TSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@19 + LLVM_PREFIX=$(brew --prefix llvm@19) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with thread sanitizer support + run: | + # free-threaded Python is much more likely to trigger races + CONFIGURE_OPTS="--with-thread-sanitizer" pyenv install 3.13t + pyenv global 3.13t + - name: Install dependencies + run: | + # TODO: remove when a released cython supports free-threaded python + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the TSAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=thread + - name: Test + run: | + # These tests are slow, so only run tests in files that do "import threading" to make them count + TSAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1 \ + python -m spin test \ + `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ + -- -v -s --timeout=600 --durations=10 diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index adf2c4442a9e..bfb7e6ad841f 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 461ef2b4253b..ded315744bd7 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4 + uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4.5.0 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 276592e1840f..cf94d8cf5800 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -6,10 +6,7 @@ on: - main - maintenance/** # Note: this workflow gets triggered on the same schedule as the - # wheels.yml workflow, with the exception that this workflow runs - # the test suite for the Pyodide wheel too, prior to uploading it. - # - # Run on schedule to upload to Anaconda.org + # wheels.yml workflow to upload WASM wheels to Anaconda.org. schedule: # ┌───────────── minute (0 - 59) # │ ┌───────────── hour (0 - 23) @@ -35,96 +32,53 @@ concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true -permissions: - contents: read # to fetch code (actions/checkout) jobs: build-wasm-emscripten: + permissions: + contents: read # to fetch code (actions/checkout) name: Build NumPy distribution for Pyodide runs-on: ubuntu-22.04 # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' - env: - PYODIDE_VERSION: 0.26.0 - # PYTHON_VERSION and EMSCRIPTEN_VERSION are determined by PYODIDE_VERSION. - # The appropriate versions can be found in the Pyodide repodata.json - # "info" field, or in Makefile.envs: - # https://github.com/pyodide/pyodide/blob/main/Makefile.envs#L2 - PYTHON_VERSION: 3.12.1 - EMSCRIPTEN_VERSION: 3.1.58 - NODE_VERSION: 18 steps: - name: Checkout NumPy - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - # This input shall fetch tags without the need to fetch the - # entire VCS history, see https://github.com/actions/checkout#usage fetch-tags: true - - name: Set up Python ${{ env.PYTHON_VERSION }} - id: setup-python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 - with: - python-version: ${{ env.PYTHON_VERSION }} + - uses: pypa/cibuildwheel@7940a4c0e76eb2030e473a5f864f291f63ee879b # v2.21.3 + env: + CIBW_PLATFORM: pyodide - - name: Set up Emscripten toolchain - uses: mymindstorm/setup-emsdk@6ab9eb1bda2574c4ddb79809fc9247783eaf9021 # v14 + - name: Upload wheel artifact(s) + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: - version: ${{ env.EMSCRIPTEN_VERSION }} - actions-cache-folder: emsdk-cache - - - name: Install pyodide-build - run: pip install pyodide-build==${{ env.PYODIDE_VERSION }} - - - name: Find installation for pyodide-build - shell: python - run: | - import os - import pyodide_build - from pathlib import Path - - pyodide_build_path = Path(pyodide_build.__file__).parent - - env_file = os.getenv('GITHUB_ENV') - - with open(env_file, "a") as myfile: - myfile.write(f"PYODIDE_BUILD_PATH={pyodide_build_path}\n") - - - name: Build NumPy for Pyodide - run: | - pyodide build \ - -Cbuild-dir=build \ - -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" \ - -Csetup-args="-Dblas=none" \ - -Csetup-args="-Dlapack=none" - - - name: Set up Node.js - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + name: cp312-pyodide_wasm32 + path: ./wheelhouse/*.whl + if-no-files-found: error + + # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy + # WARNING: this job will overwrite any existing WASM wheels. + upload-wheels: + name: Upload NumPy WASM wheels to Anaconda.org + runs-on: ubuntu-22.04 + permissions: {} + needs: [build-wasm-emscripten] + if: >- + (github.repository == 'numpy/numpy') && + (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || + (github.event_name == 'schedule') + steps: + - name: Download wheel artifact(s) + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - node-version: ${{ env.NODE_VERSION }} - - - name: Set up Pyodide virtual environment - run: | - pyodide venv .venv-pyodide - source .venv-pyodide/bin/activate - pip install dist/*.whl - pip install -r requirements/emscripten_test_requirements.txt - - - name: Test NumPy for Pyodide - run: | - source .venv-pyodide/bin/activate - cd .. - pytest --pyargs numpy -m "not slow" + path: wheelhouse/ + merge-multiple: true - # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy - # WARNING: this job will overwrite any existing WASM wheels. - name: Push to Anaconda PyPI index - if: >- - (github.repository == 'numpy/numpy') && - (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || - (github.event_name == 'schedule') - uses: scientific-python/upload-nightly-action@b67d7fcc0396e1128a474d1ab2b48aa94680f9fc # v0.5.0 + uses: scientific-python/upload-nightly-action@82396a2ed4269ba06c6b2988bb4fd568ef3c3d6b # v0.6.1 with: - artifacts_path: dist/ + artifacts_path: wheelhouse/ anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 2e63c7494c54..b4826f2e1642 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,7 +37,7 @@ jobs: with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.10' - name: Install linter requirements @@ -55,15 +55,20 @@ jobs: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" strategy: matrix: - version: ["3.10", "3.11", "3.12", "3.13-dev"] + version: ["3.10", "3.11", "3.12", "3.13", "3.13t"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: quansight-labs/setup-python@b9ab292c751a42bcd2bb465b7fa202ea2c3f5796 # v5.3.1 with: python-version: ${{ matrix.version }} + # TODO: remove cython nightly install when cython does a release + - name: Install nightly Cython + if: matrix.version == '3.13t' + run: | + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython - uses: ./.github/meson_actions pypy: @@ -75,9 +80,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 'pypy3.10-v7.3.15' + python-version: 'pypy3.10-v7.3.17' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt @@ -122,7 +127,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.10' - name: Install build and test dependencies from PyPI @@ -147,6 +152,8 @@ jobs: run: | pytest numpy --cov-report=html:build/coverage # TODO: gcov + env: + PYTHONOPTIMIZE: 2 benchmark: needs: [smoke_test] @@ -157,7 +164,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.10' - name: Install build and benchmarking dependencies @@ -181,7 +188,7 @@ jobs: - name: Check docstests shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | - pip install scipy-doctest hypothesis matplotlib scipy pytz pandas + pip install scipy-doctest==1.5.1 hypothesis==6.104.1 matplotlib scipy pytz pandas spin check-docs -v spin check-tutorials -v @@ -194,7 +201,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -236,7 +243,7 @@ jobs: submodules: 'true' path: 'array-api-tests' - name: Set up Python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -264,7 +271,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -274,7 +281,7 @@ jobs: pip install vulture - name: Build and install NumPy run: | - # Install using the fastests way to build (no BLAS, no SIMD) + # Install using the fastest way to build (no BLAS, no SIMD) spin build -j2 -- -Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none - name: Check build-internal dependencies run: | @@ -291,23 +298,3 @@ jobs: rm -rf build-install ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests - - free-threaded: - needs: [smoke_test] - runs-on: ubuntu-latest - if: github.event_name != 'push' - steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - submodules: recursive - fetch-tags: true - # TODO: replace with setup-python when there is support - - uses: deadsnakes/action@6c8b9b82fe0b4344f4b98f2775fcc395df45e494 # v3.1.0 - with: - python-version: '3.13-dev' - nogil: true - # TODO: remove cython nightly install when cython does a release - - name: Install nightly Cython - run: | - pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython - - uses: ./.github/meson_actions diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index e3d032ee25d4..1b8121a16254 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -111,9 +111,6 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color - # TODO: remove when scipy-openblas nightly tests aren't failing anymore. - # xref gh-26824 - continue-on-error: true run: | pip install pytest pytest-xdist hypothesis typing_extensions spin test -j auto @@ -198,7 +195,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -226,7 +223,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -286,7 +283,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -349,7 +346,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -385,7 +382,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml index efa8eb980730..0f685d1f2ac7 100644 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ b/.github/workflows/linux_compiler_sanitizers.yml @@ -30,7 +30,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml index ee33632c2343..18a6a5eefe4a 100644 --- a/.github/workflows/linux_musl.yml +++ b/.github/workflows/linux_musl.yml @@ -24,7 +24,7 @@ jobs: container: # Use container used for building musllinux wheels # it has git installed, all the pythons, etc - image: quay.io/pypa/musllinux_1_1_x86_64 + image: quay.io/pypa/musllinux_1_2_x86_64 steps: - name: setup diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index d4d6fe4a4989..d773152bb1bb 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -44,22 +44,25 @@ jobs: # test_unary_spurious_fpexception is currently skipped # FIXME(@seiko2plus): Requires confirmation for the following issue: # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception", + "arm" + ] - [ "ppc64le", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "ppc64le - baseline(Power9)", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vsx3", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "s390x", "s390x-linux-gnu", @@ -68,27 +71,31 @@ jobs: # Skipping TestRationalFunctions.test_gcd_overflow test # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "s390x - baseline(Z13)", "s390x-linux-gnu", "s390x/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "riscv64", "riscv64-linux-gnu", "riscv64/ubuntu:22.04", "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc" - ] + "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", + "riscv64" + ] env: TOOLCHAIN_NAME: ${{ matrix.BUILD_PROP[1] }} DOCKER_CONTAINER: ${{ matrix.BUILD_PROP[2] }} MESON_OPTIONS: ${{ matrix.BUILD_PROP[3] }} RUNTIME_TEST_FILTER: ${{ matrix.BUILD_PROP[4] }} + ARCH: ${{ matrix.BUILD_PROP[5] }} TERM: xterm-256color name: "${{ matrix.BUILD_PROP[0] }}" @@ -108,7 +115,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.2 + uses: actions/cache@v4.2.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -117,7 +124,8 @@ jobs: - name: Creates new container if: steps.container-cache.outputs.cache-hit != 'true' run: | - docker run --name the_container --interactive -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " + docker run --platform=linux/${ARCH} --name the_container --interactive \ + -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && @@ -133,7 +141,9 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install -r /numpy/requirements/build_requirements.txt && + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install -r /tmp/build_requirements.txt && python -m pip install pytest pytest-xdist hypothesis typing_extensions && rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " @@ -147,10 +157,11 @@ jobs: - name: Meson Build run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin build --clean -- ${MESON_OPTIONS} + '" - name: Meson Log if: always() @@ -158,9 +169,11 @@ jobs: - name: Run Tests run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - export F90=/usr/bin/gfortran - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + export F90=/usr/bin/gfortran + cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + '" + diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index a19ae38502ba..cff04bfe724a 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -17,7 +17,7 @@ name: Linux SIMD tests # # - native: # Tests against the host CPU features set as the baseline without enabling any runtime dispatched features. -# Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrincis. +# Intended to assess the entire NumPy codebase against host flags, even for code sections lacking handwritten SIMD intrinsics. # # - without_avx512/avx2/fma3: # Uses runtime SIMD dispatching but disables AVX2, FMA3, and AVX512. @@ -62,7 +62,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.10' - uses: ./.github/meson_actions @@ -79,7 +79,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.10' @@ -144,7 +144,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -158,7 +158,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -208,7 +208,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 164a4c6710c2..d40ef9f60f20 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,4 +1,4 @@ -name: macOS tests (meson) +name: macOS tests on: pull_request: @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.11"] + python-version: ["3.12"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -52,15 +52,15 @@ jobs: restore-keys: | ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 + - name: Setup Miniforge + uses: conda-incubator/setup-miniconda@d2e6a045a86077fb6cad6f5adf368e9076ddaa8d # v3.1.0 with: python-version: ${{ matrix.python-version }} channels: conda-forge channel-priority: true activate-environment: numpy-dev use-only-tar-bz2: false - miniforge-variant: Mambaforge + miniforge-variant: Miniforge3 miniforge-version: latest use-mamba: true @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -103,7 +103,8 @@ jobs: accelerate: - name: Accelerate (LP64, ILP64) - ${{ matrix.build_runner[1] }} + name: Accelerate - ${{ matrix.build_runner[1] }} - ${{ matrix.version }} + # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ${{ matrix.build_runner[0] }} strategy: @@ -112,6 +113,7 @@ jobs: build_runner: - [ macos-13, "macos_x86_64" ] - [ macos-14, "macos_arm64" ] + version: ["3.10", "3.13t"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -119,15 +121,21 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: quansight-labs/setup-python@b9ab292c751a42bcd2bb465b7fa202ea2c3f5796 # v5.3.1 with: - python-version: '3.10' + python-version: ${{ matrix.version }} - uses: maxim-lobanov/setup-xcode@60606e260d2fc5762a71e64e74b2174e8ea3c8bd # v1.6.0 if: ${{ matrix.build_runner[0] == 'macos-13' }} with: xcode-version: '14.3' + # TODO: remove cython nightly install when cython does a release + - name: Install nightly Cython + if: matrix.version == '3.13t' + run: | + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + - name: Install dependencies run: | pip install -r requirements/build_requirements.txt diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 726e6b839051..058a6b6a4275 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -47,19 +47,22 @@ jobs: matrix: os_python: - [ubuntu-latest, '3.12'] - - [windows-2019, '3.11'] - - [macos-12, '3.10'] + - [windows-latest, '3.11'] + - [macos-latest, '3.10'] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies run: | pip install -r requirements/build_requirements.txt + # orjson makes mypy faster but the default requirements.txt + # can't install it because orjson doesn't support 32 bit Linux + pip install orjson pip install -r requirements/test_requirements.txt - name: Build run: | diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d9577fae45ac..31241637244b 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: SARIF file path: results.sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v2.1.27 + uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index ce034d24d2ea..2bf5b7ce0e52 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -76,8 +76,8 @@ jobs: # Github Actions doesn't support pairing matrix values together, let's improvise # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] + - [ubuntu-22.04, manylinux_x86_64, ""] + - [ubuntu-22.04, musllinux_x86_64, ""] - [macos-13, macosx_x86_64, openblas] # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile @@ -90,14 +90,10 @@ jobs: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] python: "pp310" - - buildplat: [ ubuntu-20.04, musllinux_x86_64, "" ] + - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] python: "pp310" - buildplat: [ macos-14, macosx_arm64, accelerate ] python: "pp310" - - buildplat: [ windows-2019, win_amd64, "" ] - python: "cp313t" - - buildplat: [ windows-2019, win32, "" ] - python: "cp313t" - buildplat: [ macos13, macosx_x86_64, openblas ] python: "cp313t" @@ -130,7 +126,7 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: "3.x" @@ -162,22 +158,23 @@ jobs: - name: Set up free-threaded build if: matrix.python == 'cp313t' + shell: bash -el {0} run: | echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@bd033a44476646b606efccdd5eed92d5ea1d77ad # v2.20.0 + uses: pypa/cibuildwheel@7940a4c0e76eb2030e473a5f864f291f63ee879b # v2.21.3 env: CIBW_PRERELEASE_PYTHONS: True CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@f8b8a1e23a26f60a44c853292711bacfd3eac822 + - uses: mamba-org/setup-micromamba@ab6bf8bf7403e8023a094abeec19d6753bdc143e with: # for installation of anaconda-client, required for upload to # anaconda.org @@ -231,7 +228,7 @@ jobs: with: submodules: true # Used to push the built wheels - - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: # Build sdist on lowest supported Python python-version: "3.10" @@ -253,12 +250,12 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 + - uses: conda-incubator/setup-miniconda@d2e6a045a86077fb6cad6f5adf368e9076ddaa8d # v3.1.0 with: # for installation of anaconda-client, required for upload to # anaconda.org diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 0ecf4be83628..541e8fd77ab5 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -31,7 +31,7 @@ jobs: fetch-tags: true - name: Setup Python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.11' @@ -94,7 +94,7 @@ jobs: fetch-tags: true - name: Setup Python (32-bit) - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: python-version: '3.10' architecture: 'x86' diff --git a/.github/workflows/windows_arm64.yml b/.github/workflows/windows_arm64.yml new file mode 100644 index 000000000000..0f9a22389049 --- /dev/null +++ b/.github/workflows/windows_arm64.yml @@ -0,0 +1,207 @@ +name: Windows Arm64 + +on: + workflow_dispatch: + +env: + python_version: 3.12 + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + windows_arm: + runs-on: windows-2019 + + # To enable this job on a fork, comment out: + if: github.repository == 'numpy/numpy' + steps: + - name: Checkout + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: recursive + fetch-tags: true + + - name: Setup Python + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + with: + python-version: ${{env.python_version}} + architecture: x64 + + - name: Install build dependencies from PyPI + run: | + python -m pip install -r requirements/build_requirements.txt + + - name: Prepare python + shell: powershell + run: | + $ErrorActionPreference = "Stop" + + #Detecting python location and version + $PythonDir = (Split-Path -Parent (get-command python).Path) + $PythonVersionParts = ( -split (python -V)) + $PythonVersion = $PythonVersionParts[1] + + #Downloading the package for appropriate python version from nuget + $PythonARM64NugetLink = "https://www.nuget.org/api/v2/package/pythonarm64/$PythonVersion" + $PythonARM64NugetZip = "nuget_python.zip" + $PythonARM64NugetDir = "temp_nuget" + Invoke-WebRequest $PythonARM64NugetLink -OutFile $PythonARM64NugetZip + + #Changing the libs folder to enable python libraries to be linked for arm64 + Expand-Archive $PythonARM64NugetZip $PythonARM64NugetDir + Copy-Item $PythonARM64NugetDir\tools\libs\* $PythonDir\libs + Remove-Item -Force -Recurse $PythonARM64NugetDir + Remove-Item -Force $PythonARM64NugetZip + + if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } + + - name: Prepare Licence + shell: powershell + run: | + $ErrorActionPreference = "Stop" + + $CurrentDir = (get-location).Path + $LicenseFile = "$CurrentDir\LICENSE.txt" + Set-Content $LicenseFile ([Environment]::NewLine) + Add-Content $LicenseFile "----" + Add-Content $LicenseFile ([Environment]::NewLine) + Add-Content $LicenseFile (Get-Content "$CurrentDir\LICENSES_bundled.txt") + Add-Content $LicenseFile (Get-Content "$CurrentDir\tools\wheels\LICENSE_win32.txt") + + if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } + + - name: Wheel build + shell: powershell + run: | + $ErrorActionPreference = "Stop" + + #Creating cross compile script for messon subsystem + $CurrentDir = (get-location) + $CrossScript = "$CurrentDir\arm64_w64.txt" + $CrossScriptContent = + { + [host_machine] + system = 'windows' + subsystem = 'windows' + kernel = 'nt' + cpu_family = 'aarch64' + cpu = 'aarch64' + endian = 'little' + + [binaries] + c='cl.exe' + cpp = 'cl.exe' + + [properties] + sizeof_short = 2 + sizeof_int = 4 + sizeof_long = 4 + sizeof_long_long = 8 + sizeof_float = 4 + sizeof_double = 8 + sizeof_long_double = 8 + sizeof_size_t = 8 + sizeof_wchar_t = 2 + sizeof_off_t = 4 + sizeof_Py_intptr_t = 8 + sizeof_PY_LONG_LONG = 8 + longdouble_format = 'IEEE_DOUBLE_LE' + } + Set-Content $CrossScript $CrossScriptContent.ToString() + + #Setting up cross compilers from MSVC + $Products = 'Community', 'Professional', 'Enterprise', 'BuildTools' | % { "Microsoft.VisualStudio.Product.$_" } + $VsInstallPath = (vswhere -products $Products -latest -format json | ConvertFrom-Json).installationPath + $VSVars = (Get-ChildItem -Path $VsInstallPath -Recurse -Filter "vcvarsamd64_arm64.bat").FullName + $ScriptingObj = New-Object -ComObject Scripting.FileSystemObject + $VSVarsShort = $ScriptingObj.GetFile($VSVars).ShortPath + cmd /c "$VSVarsShort && set" | + ForEach-Object { + if ($_ -match "=") { + $Var = $_.split("=") + set-item -force -path "ENV:\$($Var[0])" -value "$($Var[1])" + } + } + + #Building the wheel + pip wheel . --config-settings=setup-args="--cross-file=$CrossScript" + + if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } + + - name: Fix wheel + shell: powershell + run: | + $ErrorActionPreference = "Stop" + + #Finding whl file + $CurrentDir = (get-location) + $WhlName = ((Get-ChildItem -Filter "*.whl").FullName) + $ZipWhlName = "$CurrentDir\ZipWhlName.zip" + $UnzippedWhl = "$CurrentDir\unzipedWhl" + + #Expanding whl file + Rename-Item -Path $WhlName $ZipWhlName + if (Test-Path $UnzippedWhl) { + Remove-Item -Force -Recurse $UnzippedWhl + } + Expand-Archive -Force -Path $ZipWhlName $UnzippedWhl + + #Renaming all files to show that their arch is arm64 + Get-ChildItem -Recurse -Path $UnzippedWhl *win_amd64* | Rename-Item -NewName { $_.Name -replace 'win_amd64', 'win_arm64' } + $DIST_DIR = (Get-ChildItem -Recurse -Path $UnzippedWhl *dist-info).FullName + + #Changing amd64 references from metafiles + (GET-Content $DIST_DIR/RECORD) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/RECORD + (GET-Content $DIST_DIR/WHEEL) -replace 'win_amd64', 'win_arm64' | Set-Content $DIST_DIR/WHEEL + + #Packing whl file + Compress-Archive -Path $UnzippedWhl\* -DestinationPath $ZipWhlName -Force + $WhlName = $WhlName.Replace("win_amd64", "win_arm64") + Rename-Item -Path $ZipWhlName $WhlName + + if ((Test-Path -LiteralPath variable:\LASTEXITCODE)) { exit $LASTEXITCODE } + + - name: Upload Artifacts + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: ${{ env.python_version }}-win_arm64 + path: ./*.whl + + - name: Setup Mamba + uses: mamba-org/setup-micromamba@ab6bf8bf7403e8023a094abeec19d6753bdc143e + with: + # for installation of anaconda-client, required for upload to + # anaconda.org + # Note that this step is *after* specific pythons have been used to + # build and test the wheel + # for installation of anaconda-client, for upload to anaconda.org + # environment will be activated after creation, and in future bash steps + init-shell: bash + environment-name: upload-env + create-args: >- + anaconda-client + + # - name: Upload wheels + # if: success() + # shell: bash -el {0} + # # see https://github.com/marketplace/actions/setup-miniconda for why + # # `-el {0}` is required. + # env: + # NUMPY_STAGING_UPLOAD_TOKEN: ${{ secrets.NUMPY_STAGING_UPLOAD_TOKEN }} + # NUMPY_NIGHTLY_UPLOAD_TOKEN: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} + # run: | + # source tools/wheels/upload_wheels.sh + # set_upload_vars + # # trigger an upload to + # # https://anaconda.org/scientific-python-nightly-wheels/numpy + # # for cron jobs or "Run workflow" (restricted to main branch). + # # Tags will upload to + # # https://anaconda.org/multibuild-wheels-staging/numpy + # # The tokens were originally generated at anaconda.org + # upload_wheels + diff --git a/.mailmap b/.mailmap index 143ad1c4a9b2..1ae0bce7f11a 100644 --- a/.mailmap +++ b/.mailmap @@ -7,55 +7,65 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. -@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> -@Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> -@DWesl <22566757+DWesl@users.noreply.github.com> -@Endolith -@GalaxySnail -@Illviljan <14371165+Illviljan@users.noreply.github.com> -@LSchroefl <65246829+LSchroefl@users.noreply.github.com> -@Lbogula -@Lisa <34400837+lyzlisa@users.noreply.github.com> -@Patrick <39380924+xamm@users.noreply.github.com> -@Scian <65375075+hoony6134@users.noreply.github.com> -@Searchingdays -@amagicmuffin <2014wcheng@gmail.com> -@code-review-doctor -@cook-1229 <70235336+cook-1229@users.noreply.github.com> -@dg3192 <113710955+dg3192@users.noreply.github.com> -@ellaella12 -@ellaella12 <120079323+ellaella12@users.noreply.github.com> -@h-vetinari -@h6197627 <44726212+h6197627@users.noreply.github.com> -@jbCodeHub -@juztamau5 -@legoffant <58195095+legoffant@users.noreply.github.com> -@liang3zy22 <35164941+liang3zy22@users.noreply.github.com> -@luzpaz -@luzpaz -@matoro -@mcp292 -@mgunyho <20118130+mgunyho@users.noreply.github.com> -@msavinash <73682349+msavinash@users.noreply.github.com> -@mykykh <49101849+mykykh@users.noreply.github.com> -@partev -@pkubaj -@pmvz -@pojaghi <36278217+pojaghi@users.noreply.github.com> -@pratiklp00 -@sfolje0 -@spacescientist -@stefan6419846 -@stefan6419846 <96178532+stefan6419846@users.noreply.github.com> -@tajbinjohn -@tautaus -@undermyumbrella1 -@xoviat <49173759+xoviat@users.noreply.github.com> -@xoviat <49173759+xoviat@users.noreply.github.com> -@yan-wyb -@yetanothercheer +!8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +!Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> +!DWesl <22566757+DWesl@users.noreply.github.com> +!Dreamge +!Endolith +!GalaxySnail +!Illviljan <14371165+Illviljan@users.noreply.github.com> +!LSchroefl <65246829+LSchroefl@users.noreply.github.com> +!Lbogula +!Lisa <34400837+lyzlisa@users.noreply.github.com> +!Patrick <39380924+xamm@users.noreply.github.com> +!Scian <65375075+hoony6134@users.noreply.github.com> +!Searchingdays +!amagicmuffin <2014wcheng@gmail.com> +!bersbersbers <12128514+bersbersbers@users.noreply.github.com> +!code-review-doctor +!cook-1229 <70235336+cook-1229@users.noreply.github.com> +!dg3192 <113710955+dg3192@users.noreply.github.com> +!ellaella12 +!ellaella12 <120079323+ellaella12@users.noreply.github.com> +!fengluoqiuwu +!fengluoqiuwu <163119756+fengluoqiuwu@users.noreply.github.com> +!h-vetinari +!h6197627 <44726212+h6197627@users.noreply.github.com> +!hutauf +!jbCodeHub +!juztamau5 +!legoffant <58195095+legoffant@users.noreply.github.com> +!liang3zy22 <35164941+liang3zy22@users.noreply.github.com> +!luzpaz +!luzpaz +!matoro +!mcp292 +!mgunyho <20118130+mgunyho@users.noreply.github.com> +!msavinash <73682349+msavinash@users.noreply.github.com> +!musvaage +!mykykh <49101849+mykykh@users.noreply.github.com> +!nullSoup <34267803+nullSoup@users.noreply.github.com> +!ogidig5 <82846833+ogidig5@users.noreply.github.com> +!partev +!pkubaj +!pmvz +!pojaghi <36278217+pojaghi@users.noreply.github.com> +!pratiklp00 +!sfolje0 +!spacescientist +!stefan6419846 +!stefan6419846 <96178532+stefan6419846@users.noreply.github.com> +!tajbinjohn +!tautaus +!undermyumbrella1 +!vahidmech +!xoviat <49173759+xoviat@users.noreply.github.com> +!xoviat <49173759+xoviat@users.noreply.github.com> +!yan-wyb +!yetanothercheer Aaron Baecker Adrin Jalali +Abraham Medina Arun Kota Arun Kota Arun Kota Aarthi Agurusa @@ -64,6 +74,7 @@ Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS +Ajay Kumar Janapareddi Alan Fontenot Alan Fontenot <36168460+logeaux@users.noreply.github.com> Abdul Muneer @@ -117,6 +128,7 @@ Andrea Sangalli <53617841+and-sang@users.noreply.github.c Andreas Klöckner Andreas Schwab Andrei Kucharavy +Andrej Zhilenkov Andrew Lawson Anirudh Subramanian Anne Archibald @@ -127,15 +139,18 @@ Antoine Pitrou Anton Prosekin Anže Starič Arfy Slowy +Arnaud Ma Aron Ahmadia Arun Kota Arun Kota +Arun Pa Arun Palaniappen Arun Persaud Ashutosh Singh Ashutosh Singh <55102089+Ashutosh619-sudo@users.noreply.github.com> Åsmund Hjulstad Auke Wiggers +Austin Ran <504977925@qq.com> Badhri Narayanan Krishnakumar Bhavuk Kalra Bhavuk Kalra @@ -148,6 +163,7 @@ Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage +Benoit Prabel Bernie Gray Bertrand Lefebvre Bharat Raghunathan @@ -171,6 +187,8 @@ Bui Duc Minh <41239569+Mibu287@users.noreply.github.co Caio Agiani Carl Kleffner Carl Leake +Carlos Henrique Hermanny Moreira da Silva +Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -185,6 +203,7 @@ Chris Vavaliaris Christian Clauss Christopher Dahlin Christopher Hanley +Christoph Buchner Christoph Gohlke Christoph Gohlke Christoph Gohlke cgholke @@ -280,6 +299,8 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Habiba Hye +Habiba Hye <145866308+HabibiHye@users.noreply.github.com> Hameer Abbasi Hannah Aizenman Han Genuit @@ -292,10 +313,13 @@ Hiroyuki V. Yamazaki Hugo van Kemenade Iantra Solari I-Shen Leong +Ishan Purekar Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst +Ishan Koradia +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -304,6 +328,7 @@ Giannis Zapantis Guillaume Peillex Jack J. Woehr Jacob M. Casey +Jakob Stevens Haas <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Jaime Fernandez Jaime Fernandez Jaime Fernandez @@ -314,6 +339,8 @@ Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Joseph Thomas +James Joseph Thomas quotuva James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey @@ -333,31 +360,36 @@ Jérôme Richard Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> -João Fontes Gonçalves -Johann Rohwer -Johann Rohwer jmrohwer -Johnathon Cusick Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Johann Faouzi +Johann Rohwer +Johann Rohwer jmrohwer Johannes Hampp <42553970+euronion@users.noreply.github.com> +Johannes Kaisinger +Johannes Kaisinger Johannes Schönberger -Johann Faouzi John Darbyshire <24256554+attack68@users.noreply.github.com> <24256554+attack68@users.noreply.github.com> John Hagen John Kirkham John Kirkham +Johnathon Cusick Johnson Sun <20457146+j3soon@users.noreply.github.com> Jonas I. Liechti Jonas I. Liechti Jonas I. Liechti +Joren Hammudoglu +Jory Klaverstijn +Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Martinot-Lagarde Joshua Himmens Joyce Brum -Jory Klaverstijn -Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +João Fontes Gonçalves +Julia Poo +Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor Julian Taylor Julian Taylor @@ -366,11 +398,15 @@ Julien Schueller Junyan Ou Justus Magin Justus Magin +Kai Germaschewski Kai Striega Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karel Planken <71339309+kplanken@users.noreply.github.com> +Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> +Karthik Kaiplody Keller Meier Kenny Huynh Kevin Granados @@ -381,6 +417,7 @@ Kerem Hallaç Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso +Kira Prokopenko Konrad Kapp Kristoffer Pedersen Kristoffer Pedersen @@ -395,6 +432,7 @@ Lars Grüter Leona Taric Leona Taric <92495067+LeonaTaric@users.noreply.github.com> Leonardus Chen +Liangyu Zhang Licht Takeuchi Lorenzo Mammana Lillian Zha @@ -472,6 +510,8 @@ Michel Fruchart Miki Watanabe (渡邉 美希) Miles Cranmer +Milica Dančuk +Milica Dančuk love-bees <33499899+love-bees@users.noreply.github.com> Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> @@ -483,6 +523,8 @@ Mukulika Pahari <60316606+Mukulikaa@users.noreply.git Munira Alduraibi Namami Shanker Namami Shanker NamamiShanker +Nathan Goldbaum +Nathan Goldbaum Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -504,6 +546,7 @@ Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík +Oscar Armas-Luy Óscar Villellas Guillén Pablo Losada Pablo Losada <48804010+TheHawz@users.noreply.github.com> @@ -514,6 +557,8 @@ Pat Miller patmiller Paul Ivanov Paul Ivanov Paul Jacobson +Paul Juma Otieno +Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> Paul Reece Paul YS Lee Paul Pey Lian Lim @@ -522,6 +567,7 @@ Pearu Peterson Pete Peeradej Tanruangporn Peter Bell Peter J Cock +Peter Kämpf Peyton Murray Phil Elson Pierre GM @@ -584,6 +630,7 @@ Sebastian Schleehauf Serge Guelton Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> Shuangchi He +Shaurya Barkund <64537538+Shaurya19@users.noreply.github.com> Shubham Gupta Shubham Gupta <63910248+shubham11941140@users.noreply.github.com> Shekhar Prasad Rajak @@ -597,9 +644,11 @@ Simon Gasse Simon Gasse Sista Seetaram Sista Seetaram <65669128+sistaseetaram@users.noreply.github.com> +Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada +Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Stefan Behnel Stefan van der Walt Stefan van der Walt @@ -634,16 +683,21 @@ Toshiki Kataoka Travis Oliphant Travis Oliphant Travis Oliphant +Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> Valentin Haenel Valentin Haenel Vardhaman Kalloli <83634399+cyai@users.noreply.github.com> Varun Nayyar +Victor Herdeiro +Vijayakumar Z Vinith Kishore Vinith Kishore <85550536+vinith2@users.noreply.github.com> Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> Wansoo Kim +Warrick Ball +Warrick Ball Warren Weckesser Warren Weckesser Weitang Li @@ -656,6 +710,7 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau +Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra diff --git a/.spin/cmds.py b/.spin/cmds.py index 0773578de913..ee9fa38346a7 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,13 +1,11 @@ import os import shutil import pathlib -import shutil -import pathlib import importlib import subprocess import click -from spin import util +import spin from spin.cmds import meson @@ -38,8 +36,7 @@ def _get_numpy_tools(filename): "revision-range", required=True ) -@click.pass_context -def changelog(ctx, token, revision_range): +def changelog(token, revision_range): """👩 Get change log for provided revision range \b @@ -74,71 +71,20 @@ def changelog(ctx, token, revision_range): ) -@click.command() -@click.option( - "-j", "--jobs", - help="Number of parallel tasks to launch", - type=int -) -@click.option( - "--clean", is_flag=True, - help="Clean build directory before build" -) -@click.option( - "-v", "--verbose", is_flag=True, - help="Print all build output, even installation" -) @click.option( "--with-scipy-openblas", type=click.Choice(["32", "64"]), default=None, help="Build with pre-installed scipy-openblas32 or scipy-openblas64 wheel" ) -@click.argument("meson_args", nargs=-1) -@click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): - """🔧 Build package with Meson/ninja and install - - MESON_ARGS are passed through e.g.: - - spin build -- -Dpkg_config_path=/lib64/pkgconfig - - The package is installed to build-install - - By default builds for release, to be able to use a debugger set CFLAGS - appropriately. For example, for linux use - - CFLAGS="-O0 -g" spin build - """ - # XXX keep in sync with upstream build +@spin.util.extend_command(spin.cmds.meson.build) +def build(*, parent_callback, with_scipy_openblas, **kwargs): if with_scipy_openblas: _config_openblas(with_scipy_openblas) - ctx.params.pop("with_scipy_openblas", None) - ctx.forward(meson.build) + parent_callback(**kwargs) -@click.command() -@click.argument("sphinx_target", default="html") -@click.option( - "--clean", is_flag=True, - default=False, - help="Clean previously built docs before building" -) -@click.option( - "--build/--no-build", - "first_build", - default=True, - help="Build numpy before generating docs", -) -@click.option( - '--jobs', '-j', - metavar='N_JOBS', - # Avoids pydata_sphinx_theme extension warning from default="auto". - default="1", - help=("Number of parallel build jobs." - "Can be set to `auto` to use all cores.") -) -@click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): +@spin.util.extend_command(spin.cmds.meson.docs) +def docs(*, parent_callback, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -159,22 +105,12 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): spin docs dist """ - meson.docs.ignore_unknown_options = True - - # See https://github.com/scientific-python/spin/pull/199 - # Can be changed when spin updates to 0.11, and moved to pyproject.toml - if clean: - clean_dirs = [ - './doc/build/', - './doc/source/reference/generated', - './doc/source/reference/random/bit_generators/generated', - './doc/source/reference/random/generated', - ] - - for target_dir in clean_dirs: - if os.path.isdir(target_dir): - print(f"Removing {target_dir!r}") - shutil.rmtree(target_dir) + kwargs['clean_dirs'] = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. @@ -184,11 +120,14 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): with open(outfile, 'w') as f: f.write(p.stdout) - ctx.forward(meson.docs) + parent_callback(**kwargs) + + +# Override default jobs to 1 +jobs_param = next(p for p in docs.params if p.name == 'jobs') +jobs_param.default = 1 -@click.command() -@click.argument("pytest_args", nargs=-1) @click.option( "-m", "markexpr", @@ -196,101 +135,25 @@ def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): default="not slow", help="Run tests with the given markers" ) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - "--tests", "-t", - metavar='TESTS', - help=(""" -Which tests to run. Can be a module, function, class, or method: - - \b - numpy.random - numpy.random.tests.test_generator_mt19937 - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric - numpy.random.tests.test_generator_mt19937::TestMultivariateHypergeometric::test_edge_cases - \b -""") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): - """🔧 Run tests - - PYTEST_ARGS are passed through directly to pytest, e.g.: - - spin test -- --pdb - - To run tests on a directory or file: - - \b - spin test numpy/linalg - spin test numpy/linalg/tests/test_linalg.py - - To report the durations of the N slowest tests: - - spin test -- --durations=N - - To run tests that match a given pattern: - - \b - spin test -- -k "geometric" - spin test -- -k "geometric and not rgeometric" - +@spin.util.extend_command(spin.cmds.meson.test) +def test(*, parent_callback, pytest_args, tests, markexpr, **kwargs): + """ By default, spin will run `-m 'not slow'`. To run the full test suite, use `spin test -m full` - - For more, see `pytest --help`. """ # noqa: E501 if (not pytest_args) and (not tests): - pytest_args = ('numpy',) + pytest_args = ('--pyargs', 'numpy') if '-m' not in pytest_args: - if len(pytest_args) == 1 and not tests: - tests = pytest_args[0] - pytest_args = () if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - - if tests and not ('--pyargs' in pytest_args): - pytest_args = ('--pyargs', tests) + pytest_args - - if verbose: - pytest_args = ('-v',) + pytest_args - - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('markexpr', 'n_jobs', 'tests', 'verbose'): - del ctx.params[extra_param] - ctx.forward(meson.test) + kwargs['pytest_args'] = pytest_args + parent_callback(**{'pytest_args': pytest_args, 'tests': tests, **kwargs}) -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +@spin.util.extend_command(test, doc='') +def check_docs(*, parent_callback, pytest_args, **kwargs): """🔧 Run doctests of objects in the public API. PYTEST_ARGS are passed through directly to pytest, e.g.: @@ -327,14 +190,9 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): import scipy_doctest except ModuleNotFoundError as e: raise ModuleNotFoundError("scipy-doctest not installed") from e - if (not pytest_args): - pytest_args = ('numpy',) - - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - if verbose: - pytest_args = ('-v',) + pytest_args + if (not pytest_args): + pytest_args = ('--pyargs', 'numpy') # turn doctesting on: doctest_args = ( @@ -344,39 +202,21 @@ def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): pytest_args = pytest_args + doctest_args - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('n_jobs', 'verbose'): - del ctx.params[extra_param] + parent_callback(**{'pytest_args': pytest_args, **kwargs}) - ctx.forward(meson.test) - -@click.command() -@click.argument("pytest_args", nargs=-1) -@click.option( - "-j", - "n_jobs", - metavar='N_JOBS', - default="1", - help=("Number of parallel jobs for testing. " - "Can be set to `auto` to use all cores.") -) -@click.option( - '--verbose', '-v', is_flag=True, default=False -) -@click.pass_context -def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): +@spin.util.extend_command(test, doc='') +def check_tutorials(*, parent_callback, pytest_args, **kwargs): """🔧 Run doctests of user-facing rst tutorials. - To test all tutorials in the numpy/doc/source/user/ directory, use + To test all tutorials in the numpy doc/source/user/ directory, use spin check-tutorials To run tests on a specific RST file: \b - spin check-tutorials numpy/doc/source/user/absolute-beginners.rst + spin check-tutorials doc/source/user/absolute-beginners.rst \b Note: @@ -393,20 +233,14 @@ def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): # - `spin check-tutorials path/to/rst`, and # - `spin check-tutorials path/to/rst -- --durations=3` if (not pytest_args) or all(arg.startswith('-') for arg in pytest_args): - pytest_args = ('numpy/doc/source/user',) + pytest_args + pytest_args = ('doc/source/user',) + pytest_args # make all paths relative to the numpy source folder pytest_args = tuple( - str(curdir / '..' / '..' / arg) if not arg.startswith('-') else arg + str(curdir / '..' / arg) if not arg.startswith('-') else arg for arg in pytest_args ) - if (n_jobs != "1") and ('-n' not in pytest_args): - pytest_args = ('-n', str(n_jobs)) + pytest_args - - if verbose: - pytest_args = ('-v',) + pytest_args - # turn doctesting on: doctest_args = ( '--doctest-glob=*rst', @@ -414,12 +248,7 @@ def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): pytest_args = pytest_args + doctest_args - ctx.params['pytest_args'] = pytest_args - - for extra_param in ('n_jobs', 'verbose'): - del ctx.params[extra_param] - - ctx.forward(meson.test) + parent_callback(**{'pytest_args': pytest_args, **kwargs}) # From scipy: benchmarks/benchmarks/common.py @@ -446,7 +275,7 @@ def _set_mem_rlimit(max_mem=None): def _commit_to_sha(commit): - p = util.run(['git', 'rev-parse', commit], output=False, echo=False) + p = spin.util.run(['git', 'rev-parse', commit], output=False, echo=False) if p.returncode != 0: raise( click.ClickException( @@ -459,10 +288,10 @@ def _commit_to_sha(commit): def _dirty_git_working_dir(): # Changes to the working directory - p0 = util.run(['git', 'diff-files', '--quiet']) + p0 = spin.util.run(['git', 'diff-files', '--quiet']) # Staged changes - p1 = util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) + p1 = spin.util.run(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) return (p0.returncode != 0 or p1.returncode != 0) @@ -487,7 +316,7 @@ def _run_asv(cmd): except (ImportError, RuntimeError): pass - util.run(cmd, cwd='benchmarks', env=env) + spin.util.run(cmd, cwd='benchmarks', env=env) @click.command() @click.option( @@ -510,7 +339,7 @@ def lint(ctx, branch, uncommitted): Examples: \b - For lint checks of your development brach with `main` or a custom branch: + For lint checks of your development branch with `main` or a custom branch: \b $ spin lint # defaults to main @@ -558,8 +387,9 @@ def lint(ctx, branch, uncommitted): required=False, nargs=-1 ) +@meson.build_dir_option @click.pass_context -def bench(ctx, tests, compare, verbose, quick, commits): +def bench(ctx, tests, compare, verbose, quick, commits, build_dir): """🏋 Run benchmarks. \b @@ -611,9 +441,9 @@ def bench(ctx, tests, compare, verbose, quick, commits): ) ctx.invoke(build) - meson._set_pythonpath() + meson._set_pythonpath(build_dir) - p = util.run( + p = spin.util.run( ['python', '-c', 'import numpy as np; print(np.__version__)'], cwd='benchmarks', echo=False, @@ -647,29 +477,20 @@ def bench(ctx, tests, compare, verbose, quick, commits): _run_asv(cmd_compare) -@click.command(context_settings={ - 'ignore_unknown_options': True -}) -@click.argument("python_args", metavar='', nargs=-1) -@click.pass_context -def python(ctx, python_args, *args, **kwargs): - """🐍 Launch Python shell with PYTHONPATH set - - OPTIONS are passed through directly to Python, e.g.: - - spin python -c 'import sys; print(sys.path)' - """ +@spin.util.extend_command(meson.python) +def python(*, parent_callback, **kwargs): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') - ctx.forward(meson.python) + + parent_callback(**kwargs) @click.command(context_settings={ 'ignore_unknown_options': True }) @click.argument("ipython_args", metavar='', nargs=-1) -@click.pass_context -def ipython(ctx, ipython_args): +@meson.build_dir_option +def ipython(*, ipython_args, build_dir): """💻 Launch IPython shell with PYTHONPATH set OPTIONS are passed through directly to IPython, e.g.: @@ -679,16 +500,19 @@ def ipython(ctx, ipython_args): env = os.environ env['PYTHONWARNINGS'] = env.get('PYTHONWARNINGS', 'all') + ctx = click.get_current_context() ctx.invoke(build) - ppath = meson._set_pythonpath() + ppath = meson._set_pythonpath(build_dir) print(f'💻 Launching IPython with PYTHONPATH="{ppath}"') + + # In spin >= 0.13.1, can replace with extended command, setting `pre_import` preimport = (r"import numpy as np; " r"print(f'\nPreimported NumPy {np.__version__} as np')") - util.run(["ipython", "--ignore-cwd", - f"--TerminalIPythonApp.exec_lines={preimport}"] + - list(ipython_args)) + spin.util.run(["ipython", "--ignore-cwd", + f"--TerminalIPythonApp.exec_lines={preimport}"] + + list(ipython_args)) @click.command(context_settings={"ignore_unknown_options": True}) @@ -702,6 +526,7 @@ def mypy(ctx): ctx.params['markexpr'] = 'full' ctx.forward(test) + @click.command(context_settings={ 'ignore_unknown_options': True }) @@ -747,8 +572,7 @@ def _config_openblas(blas_variant): help="NumPy version of release", required=False ) -@click.pass_context -def notes(ctx, version_override): +def notes(version_override): """🎉 Generate release notes and validate \b @@ -763,7 +587,7 @@ def notes(ctx, version_override): \b $ spin notes """ - project_config = util.get_config() + project_config = spin.util.get_config() version = version_override or project_config['project.version'] click.secho( @@ -774,7 +598,7 @@ def notes(ctx, version_override): # Check if `towncrier` is installed if not shutil.which("towncrier"): raise click.ClickException( - f"please install `towncrier` to use this command" + "please install `towncrier` to use this command" ) click.secho( @@ -783,7 +607,7 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + p = spin.util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") if p.returncode != 0: raise click.ClickException( f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 000000000000..6e019983a0a2 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,17 @@ +=============================== +NumPy's Contributing guidelines +=============================== + +Welcome to the NumPy community! We're excited to have you here. +Whether you're new to open source or experienced, your contributions +help us grow. + +Pull requests (PRs) are always welcome, but making a PR is just the +start. Please respond to comments and requests for changes to help +move the process forward. Please follow our +`Code of Conduct `__, which applies +to all interactions, including issues and PRs. + +For more, please read https://www.numpy.org/devdocs/dev/index.html + +Thank you for contributing, and happy coding! diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 815c9a1dba33..b3d8aa8bed06 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -29,3 +29,8 @@ Name: spin Files: .spin/cmds.py License: BSD-3 For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/benchmarks/asv_pip_nopep517.py b/benchmarks/asv_pip_nopep517.py index 085cbff1f4ee..cffc42a55c7d 100644 --- a/benchmarks/asv_pip_nopep517.py +++ b/benchmarks/asv_pip_nopep517.py @@ -1,7 +1,8 @@ """ This file is used by asv_compare.conf.json.tpl. """ -import subprocess, sys +import subprocess +import sys # pip ignores '--global-option' when pep517 is enabled therefore we disable it. cmd = [sys.executable, '-mpip', 'wheel', '--no-use-pep517'] try: diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 8efa67de33eb..6aa85c22f614 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -42,7 +42,7 @@ def dirty_lock(lock_name, lock_on_count=1): count = 0 f.seek(0) f.truncate() - f.write(f"{str(count)} {str(ppid)}") + f.write(f"{count} {ppid}") except OSError: pass return False diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py index 632318d61084..6d5076434e90 100644 --- a/benchmarks/benchmarks/bench_core.py +++ b/benchmarks/benchmarks/bench_core.py @@ -170,9 +170,34 @@ def time_count_nonzero_multi_axis(self, numaxes, size, dtype): self.x.ndim - 1, self.x.ndim - 2)) +class Nonzero(Benchmark): + params = [ + [bool, np.uint8, np.uint64, np.int64, np.float32, np.float64], + [(1_000_000,), (1000, 1000), (100, ), (2, )] + ] + param_names = ["dtype", "shape"] + + def setup(self, dtype, size): + self.x = np.random.randint(0, 3, size=size).astype(dtype) + self.x_sparse = np.zeros(size).astype(dtype) + self.x_sparse[1] = 1 + self.x_sparse[-1] = 1 + self.x_dense = np.ones(size).astype(dtype) + + def time_nonzero(self, dtype, size): + np.nonzero(self.x) + + def time_nonzero_sparse(self, dtype, size): + np.nonzero(self.x_sparse) + + def time_nonzero_dense(self, dtype, size): + np.nonzero(self.x_dense) + + class PackBits(Benchmark): param_names = ['dtype'] params = [[bool, np.uintp]] + def setup(self, dtype): self.d = np.ones(10000, dtype=dtype) self.d2 = np.ones((200, 1000), dtype=dtype) diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py index e316d07f3582..80b3739e0be9 100644 --- a/benchmarks/benchmarks/bench_io.py +++ b/benchmarks/benchmarks/bench_io.py @@ -88,7 +88,7 @@ def setup(self, num_lines): # unfortunately, timeit will only run setup() # between repeat events, but not for iterations # within repeats, so the StringIO object - # will have to be rewinded in the benchmark proper + # will have to be rewound in the benchmark proper self.data_comments = StringIO('\n'.join(data)) def time_comment_loadtxt_csv(self, num_lines): diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index f3eb819c1803..8785a37d7d27 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -72,7 +72,7 @@ def time_tensordot_a_b_axes_1_0_0_1(self): class Linalg(Benchmark): - params = sorted(list(set(TYPES1) - set(['float16']))) + params = sorted(set(TYPES1) - set(['float16'])) param_names = ['dtype'] def setup(self, typename): @@ -111,7 +111,7 @@ def time_norm_small_array(self): def time_det_small_array(self): np.linalg.det(self.array_5_5) - + class Lstsq(Benchmark): def setup(self): self.a = get_squares_()['float64'] diff --git a/benchmarks/benchmarks/bench_ma.py b/benchmarks/benchmarks/bench_ma.py index f17da1a9ebe1..2f369ac22e85 100644 --- a/benchmarks/benchmarks/bench_ma.py +++ b/benchmarks/benchmarks/bench_ma.py @@ -213,7 +213,7 @@ def time_methods_getitem(self, margs, msize): mdat = self.nmxs elif msize == 'big': mdat = self.nmxl - getattr(mdat, '__getitem__')(margs) + mdat.__getitem__(margs) class MAMethodSetItem(Benchmark): @@ -235,7 +235,7 @@ def time_methods_setitem(self, margs, mset, msize): mdat = self.nmxs elif msize == 'big': mdat = self.nmxl - getattr(mdat, '__setitem__')(margs, mset) + mdat.__setitem__(margs, mset) class Where(Benchmark): diff --git a/benchmarks/benchmarks/bench_polynomial.py b/benchmarks/benchmarks/bench_polynomial.py index ab2e95b7d1ab..fed079434c46 100644 --- a/benchmarks/benchmarks/bench_polynomial.py +++ b/benchmarks/benchmarks/bench_polynomial.py @@ -22,8 +22,8 @@ def time_polynomial_evaluation_array_3(self): def time_polynomial_evaluation_array_1000(self): self.polynomial_degree2(self.array1000) - + def time_polynomial_addition(self): _ = self.polynomial_degree2 + self.polynomial_degree2 - + diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 3545d939cf36..895c8e931590 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -16,12 +16,12 @@ 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'true_divide', 'trunc', 'vecdot'] + 'true_divide', 'trunc', 'vecdot', 'vecmat'] arrayfuncdisp = ['real', 'round'] for name in ufuncs: @@ -50,7 +50,7 @@ def setup(self, ufuncname): try: self.afdn = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError self.args = [] for _, aarg in get_squares_().items(): arg = (aarg,) * 1 # no nin @@ -97,7 +97,7 @@ def setup(self, ufuncname): try: self.ufn = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError self.args = [] for _, aarg in get_squares_().items(): arg = (aarg,) * self.ufn.nin @@ -258,7 +258,7 @@ def time_methods_getitem(self, margs, msize): mdat = self.xs elif msize == 'big': mdat = self.xl - getattr(mdat, '__getitem__')(margs) + mdat.__getitem__(margs) class NDArraySetItem(Benchmark): @@ -332,7 +332,7 @@ def setup(self, ufuncname): try: self.f = getattr(np, ufuncname) except AttributeError: - raise NotImplementedError() + raise NotImplementedError self.array_5 = np.array([1., 2., 10., 3., 4.]) self.array_int_3 = np.array([1, 2, 3]) self.float64 = np.float64(1.1) @@ -597,7 +597,7 @@ def setup(self, dtype): N = 1000000 self.a = np.random.randint(20, size=N).astype(dtype) self.b = np.random.randint(4, size=N).astype(dtype) - + def time_pow(self, dtype): np.power(self.a, self.b) diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index 5cbc2f38f31d..80957d634cab 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -1,6 +1,5 @@ import numpy as np import random -import os from functools import lru_cache from pathlib import Path diff --git a/doc/Makefile b/doc/Makefile index 57d063e9c936..910da1e06e61 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -118,12 +118,14 @@ endif tar -C build/merge/$(TAG) -xf build/dist.tar.gz git -C build/merge add $(TAG) @# For now, the user must do this. If it is onerous, automate it and change - @# the instructions in doc/HOWTO_RELEASE.rst + @# the instructions in doc/RELEASE_WALKTHROUGH.rst @echo " " @echo New documentation archive added to ./build/merge. @echo Now add/modify the appropriate section after @echo " " @echo in build/merge/index.html, + @echo change _static/versions.json, + @echo and run \"python3 update.py\" @echo then \"git commit\", \"git push\" diff --git a/doc/RELEASE_WALKTHROUGH.rst b/doc/RELEASE_WALKTHROUGH.rst index c82adf221057..702803172477 100644 --- a/doc/RELEASE_WALKTHROUGH.rst +++ b/doc/RELEASE_WALKTHROUGH.rst @@ -1,7 +1,7 @@ -This is a walkthrough of the NumPy 1.21.0 release on Linux, modified for +This is a walkthrough of the NumPy 2.1.0 release on Linux, modified for building with GitHub Actions and cibuildwheels and uploading to the `anaconda.org staging repository for NumPy `_. -The commands can be copied into the command line, but be sure to replace 1.21.0 +The commands can be copied into the command line, but be sure to replace 2.1.0 by the correct version. This should be read together with the :ref:`general release guide `. @@ -29,13 +29,13 @@ Add/drop Python versions When adding or dropping Python versions, three files need to be edited: - .github/workflows/wheels.yml # for github cibuildwheel -- .travis.yml # for cibuildwheel aarch64 builds -- setup.py # for classifier and minimum version check. +- tools/ci/cirrus_wheels.yml # for cibuildwheel aarch64/arm64 builds +- pyproject.toml # for classifier and minimum version check. Make these changes in an ordinary PR against main and backport if necessary. -Using the `BLD:` prefix (build label) for the commit summary will cause the -wheel builds to be run so that the changes will be tested, We currently release -wheels for new Python versions after the first Python rc once manylinux and +Add ``[wheel build]`` at the end of the title line of the commit summary so +that wheel builds will be run to test the changes. We currently release wheels +for new Python versions after the first Python rc once manylinux and cibuildwheel support it. For Python 3.11 we were able to release within a week of the rc1 announcement. @@ -44,34 +44,54 @@ Backport pull requests ---------------------- Changes that have been marked for this release must be backported to the -maintenance/1.21.x branch. +maintenance/2.1.x branch. + +Update 2.1.0 milestones +----------------------- + +Look at the issues/prs with 2.1.0 milestones and either push them off to a +later version, or maybe remove the milestone. You may need to add a milestone. Make a release PR ================= -Five documents usually need to be updated or created for the release PR: +Four documents usually need to be updated or created for the release PR: - The changelog -- The release-notes +- The release notes - The ``.mailmap`` file - The ``pyproject.toml`` file -- The ``pyproject.toml.setuppy`` file # 1.26.x only These changes should be made in an ordinary PR against the maintenance branch. -The commit message should contain a ``[wheel build]`` directive to test if the +The commit heading should contain a ``[wheel build]`` directive to test if the wheels build. Other small, miscellaneous fixes may be part of this PR. The commit message might be something like:: - REL: Prepare for the NumPy 1.20.0 release + REL: Prepare for the NumPy 2.1.0 release [wheel build] - - Create 1.20.0-changelog.rst. - - Update 1.20.0-notes.rst. + - Create 2.1.0-changelog.rst. + - Update 2.1.0-notes.rst. - Update .mailmap. - Update pyproject.toml - - Update pyproject.toml.setuppy - [wheel build] + +Set the release version +----------------------- + +Check the ``pyproject.toml`` file and set the release version if needed:: + + $ gvim pyproject.toml + + +Check the ``pavement.py`` and ``doc/source/release.rst`` files +-------------------------------------------------------------- + +Check that the ``pavement.py`` file points to the correct release notes. It should +have been updated after the last release, but if not, fix it now. Also make +sure that the notes have an entry in the ``release.rst`` file:: + + $ gvim pavement.py doc/source/release.rst Generate the changelog @@ -79,7 +99,7 @@ Generate the changelog The changelog is generated using the changelog tool:: - $ spin changelog $GITHUB v1.20.0..maintenance/1.21.x > doc/changelog/1.21.0-changelog.rst + $ spin changelog $GITHUB v2.0.0..maintenance/2.1.x > doc/changelog/2.1.0-changelog.rst where ``GITHUB`` contains your GitHub access token. The text will need to be checked for non-standard contributor names and dependabot entries removed. It @@ -95,36 +115,18 @@ Finish the release notes ------------------------ If there are any release notes snippets in ``doc/release/upcoming_changes/``, -run ``spin docs`` to build the docs, incorporate the contents of the generated -``doc/source/release/notes-towncrier.rst`` file into the release notes file -(e.g., ``doc/source/release/2.3.4-notes.rst``), and delete the now-processed -snippets in ``doc/release/upcoming_changes/``. This is safe to do multiple -times during a release cycle. - -The generated release note will always need some fixups, the introduction will -need to be written, and significant changes should be called out. For patch -releases the changelog text may also be appended, but not for the initial -release as it is too long. Check previous release notes to see how this is -done. - +run ``spin notes``, which will incorporate the snippets into the +``doc/source/release/notes-towncrier.rst`` file and delete the snippets:: -Set the release version ------------------------ - -Check the ``pyproject.toml`` and ``pyproject.toml.setuppy`` files and set the -release version if needed:: - - $ gvim pyproject.toml pyproject.toml.setuppy - - -Check the ``pavement.py`` and ``doc/source/release.rst`` files --------------------------------------------------------------- - -Check that the ``pavement.py`` file points to the correct release notes. It should -have been updated after the last release, but if not, fix it now. Also make -sure that the notes have an entry in the ``release.rst`` file:: - - $ gvim pavement.py doc/source/release.rst + $ spin notes + $ gvim doc/source/release/notes-towncrier.rst doc/source/release/2.1.0-notes.rst + +Once the ``notes-towncrier`` contents has been incorporated into release note +the ``.. include:: notes-towncrier.rst`` directive can be removed. The notes +will always need some fixups, the introduction will need to be written, and +significant changes should be called out. For patch releases the changelog text +may also be appended, but not for the initial release as it is too long. Check +previous release notes to see how this is done. Release walkthrough @@ -143,8 +145,8 @@ isn't already present. Checkout the branch for the release, make sure it is up to date, and clean the repository:: - $ git checkout maintenance/1.21.x - $ git pull upstream maintenance/1.21.x + $ git checkout maintenance/2.1.x + $ git pull upstream maintenance/2.1.x $ git submodule update $ git clean -xdfq @@ -155,13 +157,13 @@ Sanity check:: Tag the release and push the tag. This requires write permission for the numpy repository:: - $ git tag -a -s v1.21.0 -m"NumPy 1.21.0 release" - $ git push upstream v1.21.0 + $ git tag -a -s v2.1.0 -m"NumPy 2.1.0 release" + $ git push upstream v2.1.0 If you need to delete the tag due to error:: - $ git tag -d v1.21.0 - $ git push --delete upstream v1.21.0 + $ git tag -d v2.1.0 + $ git push --delete upstream v2.1.0 2. Build wheels @@ -187,7 +189,7 @@ If a wheel build fails for unrelated reasons, you can rerun it individually: the build you want to rerun. On the left there is a list of wheel builds, select the one you want to rerun and on the resulting page hit the counterclockwise arrows button. -- On cirrus we haven't figured it out. +- On cirrus, log into cirrusci, look for the v2.1.0 tag and rerun the failed jobs. .. _`staging repository`: https://anaconda.org/multibuild-wheels-staging/numpy/files .. _`Wheel builder`: https://github.com/numpy/numpy/actions/workflows/wheels.yml @@ -201,7 +203,7 @@ Anaconda staging directory using the ``tools/download-wheels.py`` script:: $ cd ../numpy $ mkdir -p release/installers - $ python3 tools/download-wheels.py 1.21.0 + $ python3 tools/download-wheels.py 2.1.0 4. Generate the README files @@ -221,7 +223,7 @@ after recent PyPI changes, version ``3.4.1`` was used here:: $ cd ../numpy $ twine upload release/installers/*.whl - $ twine upload release/installers/numpy-1.21.0.tar.gz # Upload last. + $ twine upload release/installers/*.gz # Upload last. If one of the commands breaks in the middle, you may need to selectively upload the remaining files because PyPI does not allow the same file to be uploaded @@ -235,18 +237,19 @@ chosen the zip archive. 6. Upload files to GitHub ------------------------- -Go to ``_, there should be a ``v1.21.0 -tag``, click on it and hit the edit button for that tag. There are two ways to -add files, using an editable text window and as binary uploads. Start by -editing the ``release/README.md`` that is translated from the rst version using -pandoc. Things that will need fixing: PR lines from the changelog, if included, -are wrapped and need unwrapping, links should be changed to monospaced text. -Then copy the contents to the clipboard and paste them into the text window. It -may take several tries to get it look right. Then - -- Upload ``release/installers/numpy-1.21.0.tar.gz`` as a binary file. +Go to ``_, there should be a ``v2.1.0 +tag``, click on it and hit the edit button for that tag and update the title to +'v2.1.0 (). There are two ways to add files, using an editable text +window and as binary uploads. Start by editing the ``release/README.md`` that +is translated from the rst version using pandoc. Things that will need fixing: +PR lines from the changelog, if included, are wrapped and need unwrapping, +links should be changed to monospaced text. Then copy the contents to the +clipboard and paste them into the text window. It may take several tries to get +it look right. Then + +- Upload ``release/installers/numpy-2.1.0.tar.gz`` as a binary file. - Upload ``release/README.rst`` as a binary file. -- Upload ``doc/changelog/1.21.0-changelog.rst`` as a binary file. +- Upload ``doc/changelog/2.1.0-changelog.rst`` as a binary file. - Check the pre-release button if this is a pre-releases. - Hit the ``{Publish,Update} release`` button at the bottom. @@ -261,7 +264,7 @@ and most patch releases. ``make merge-doc`` clones the ``numpy/doc`` repo into ``doc/build/merge`` and updates it with the new documentation:: $ git clean -xdfq - $ git co v1.21.0 + $ git co v2.1.0 $ rm -rf doc/build # want version to be current $ python -m spin docs merge-doc --build $ pushd doc/build/merge @@ -272,31 +275,29 @@ If the release series is a new one, you will need to add a new section to the $ gvim index.html +/'insert here' Further, update the version-switcher json file to add the new release and -update the version marked `(stable)`:: +update the version marked ``(stable)`` and ``preferred``:: $ gvim _static/versions.json -Otherwise, only the ``zip`` link should be updated with the new tag name. Since -we are no longer generating ``pdf`` files, remove the line for the ``pdf`` -files if present:: +Then run ``update.py`` to update the version in ``_static``:: - $ gvim index.html +/'tag v1.21' + $ python3 update.py You can "test run" the new documentation in a browser to make sure the links -work:: +work, although the version dropdown will not change, it pulls its information +from ``numpy.org``:: $ firefox index.html # or google-chrome, etc. Update the stable link and update:: - $ ln -sfn 1.21 stable + $ ln -sfn 2.1 stable $ ls -l # check the link Once everything seems satisfactory, update, commit and upload the changes:: - $ python3 update.py - $ git commit -a -m"Add documentation for v1.21.0" - $ git push + $ git commit -a -m"Add documentation for v2.1.0" + $ git push git@github.com:numpy/doc $ popd @@ -306,22 +307,23 @@ Once everything seems satisfactory, update, commit and upload the changes:: Create release notes for next release and edit them to set the version. These notes will be a skeleton and have little content:: - $ cp doc/source/release/template.rst doc/source/release/1.21.1-notes.rst - $ gvim doc/source/release/1.21.1-notes.rst - $ git add doc/source/release/1.21.1-notes.rst + $ git checkout -b begin-2.1.1 maintenance/2.1.x + $ cp doc/source/release/template.rst doc/source/release/2.1.1-notes.rst + $ gvim doc/source/release/2.1.1-notes.rst + $ git add doc/source/release/2.1.1-notes.rst Add new release notes to the documentation release list and update the ``RELEASE_NOTES`` variable in ``pavement.py``:: $ gvim doc/source/release.rst pavement.py -Update the ``version`` in ``pyproject.toml`` and ``pyproject.toml.setuppy``:: +Update the ``version`` in ``pyproject.toml``:: - $ gvim pyproject.toml pyproject.toml.setuppy + $ gvim pyproject.toml Commit the result:: - $ git commit -a -m"MAINT: prepare 1.21.x for further development" + $ git commit -a -m"MAINT: Prepare 2.1.x for further development" $ git push origin HEAD Go to GitHub and make a PR. It should be merged quickly. @@ -335,7 +337,7 @@ This assumes that you have forked ``_:: $ cd ../numpy.org $ git checkout main $ git pull upstream main - $ git checkout -b announce-numpy-1.21.0 + $ git checkout -b announce-numpy-2.1.0 $ gvim content/en/news.md - For all releases, go to the bottom of the page and add a one line link. Look @@ -345,7 +347,7 @@ This assumes that you have forked ``_:: commit and push:: - $ git commit -a -m"announce the NumPy 1.21.0 release" + $ git commit -a -m"announce the NumPy 2.1.0 release" $ git push origin HEAD Go to GitHub and make a PR. @@ -364,26 +366,17 @@ BCC so that replies will not be sent to that list. 11. Post-release update main (skip for prereleases) --------------------------------------------------- -Checkout main and forward port the documentation changes:: +Checkout main and forward port the documentation changes. You may also want +to update these notes if procedures have changed or improved:: - $ git checkout -b post-1.21.0-release-update - $ git checkout maintenance/1.21.x doc/source/release/1.21.0-notes.rst - $ git checkout maintenance/1.21.x doc/changelog/1.21.0-changelog.rst - $ git checkout maintenance/1.21.x .mailmap # only if updated for release. + $ git checkout -b post-2.1.0-release-update main + $ git checkout maintenance/2.1.x doc/source/release/2.1.0-notes.rst + $ git checkout maintenance/2.1.x doc/changelog/2.1.0-changelog.rst + $ git checkout maintenance/2.1.x .mailmap # only if updated for release. $ gvim doc/source/release.rst # Add link to new notes $ git status # check status before commit - $ git commit -a -m"MAINT: Update main after 1.21.0 release." + $ git commit -a -m"MAINT: Update main after 2.1.0 release." $ git push origin HEAD Go to GitHub and make a PR. - -12. Update oldest-supported-numpy ---------------------------------- - -If this release is the first one to support a new Python version, or the first -to provide wheels for a new platform or PyPy version, the version pinnings -in https://github.com/scipy/oldest-supported-numpy should be updated. -Either submit a PR with changes to ``setup.cfg`` there, or open an issue with -info on needed changes. - diff --git a/doc/changelog/2.0.2-changelog.rst b/doc/changelog/2.0.2-changelog.rst new file mode 100644 index 000000000000..6622407dd8f6 --- /dev/null +++ b/doc/changelog/2.0.2-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst new file mode 100644 index 000000000000..af7f5a3b07c7 --- /dev/null +++ b/doc/changelog/2.1.0-changelog.rst @@ -0,0 +1,592 @@ + +Contributors +============ + +A total of 110 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !ogidig5 + +* !partev +* !vahidmech + +* !h-vetinari +* Aaron Meurer +* Adrin Jalali + +* Agriya Khetarpal +* Ajay Kumar Janapareddi + +* Alex Herbert + +* Andras Deak +* Andrej Zhilenkov + +* Andrew Nelson +* Anne Gunn + +* Antony Lee +* Arnaud Ma + +* Arun Kannawadi + +* Arun Pa + +* Bas van Beek +* Ben Woodruff + +* Bruno Oliveira + +* Carlos Henrique Hermanny Moreira da Silva + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christian Lorentzen +* Christopher Sidebottom +* Christopher Titchen + +* Clément Robert +* Cobalt Yang + +* Devyani Chavan + +* Dimitri Papadopoulos Orfanos +* Ebigide Jude + +* Eric Xie + +* Evgeni Burovski +* Fabian Vogt + +* Francisco Sousa + +* GUAN MING + +* Gabriel Fougeron + +* Gagandeep Singh +* Giovanni Del Monte + +* Gonzalo Tornaría + +* Gonçalo Bárias + +* Hugo van Kemenade +* Jakob Stevens Haas + +* Jakob Unfried + +* James Joseph Thomas + +* Jean Lecordier + +* Joren Hammudoglu + +* Joris Van den Bossche +* Julia Poo + +* Justus Magin +* Jyn Spring 琴春 +* KIU Shueng Chuan +* Karthik Gangula + +* Karthik Kaiplody + +* Kevin Sheppard +* Kristoffer Pedersen + +* Leo Singer +* Liang Yan +* Liangyu Zhang + +* Lucas Colley +* Luiz Eduardo Amaral + +* Lysandros Nikolaou +* Marcel Loose + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Roeschke + +* Matthew Thompson + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Milica Dančuk + +* Moritz Schreiber + +* Nathan Goldbaum +* Olivier Grisel +* Patrick J. Roddy + +* Paul Juma Otieno + +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Raquel Braunschweig + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ross Barnowski +* Rostan Tabet + +* Sam Morley + +* Sayed Adel +* Sean Cheah +* Sebastian Berg +* Serge Guelton +* Slobodan + +* Stefan van der Walt +* Thomas A Caswell +* Thomas Li +* Timo Röhling + +* Tsvika Shapira + +* Tuhin Sharma + +* Tyler Reddy +* Victor Eijkhout + +* Warren Weckesser +* Warrick Ball +* Xiangyi Wang + +* Yair Chuchem + +* Yang Liu + +* Yannik Wicke + +* Yevhen Amelin + +* Yuki K + +Pull requests merged +==================== + +A total of 469 pull requests were merged for this release. + +* `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... +* `#24448 `__: TST: add some tests of np.log for complex input. +* `#25704 `__: DOC: quantile: correct/simplify documentation +* `#25705 `__: DOC: Add documentation explaining our promotion rules +* `#25781 `__: ENH: Convert fp32 sin/cos from C universal intrinsics to C++... +* `#25908 `__: ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes +* `#25913 `__: NEP: NEP 55 updates and add @mhvk as an author +* `#25963 `__: BUG: Fix bug in numpy.pad() +* `#25964 `__: CI: fix last docbuild warnings +* `#25970 `__: MAINT: Prepare main for NumPy 2.1.0 development +* `#25971 `__: DOC: Fix a note section markup in ``dtype.rst`` +* `#25972 `__: DOC: Fix module setting of ``MaskedArray`` +* `#25974 `__: BUG: Raise error for negative-sized fixed-width dtype +* `#25975 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#25981 `__: BLD: push a tag builds a wheel +* `#25985 `__: BLD: omit pp39-macosx_arm64 from matrix +* `#25988 `__: DOC: Remove unused parameter description +* `#25990 `__: CI: clean up some unused `choco install` invocations +* `#25995 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25999 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26000 `__: BUG: Filter out broken Highway platform +* `#26003 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26005 `__: DOC: indicate stringdtype support in docstrings for string operations +* `#26006 `__: TST: remove usage of ProcessPoolExecutor in stringdtype tests +* `#26007 `__: MAINT: Remove sdist task from pavement.py +* `#26011 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26012 `__: ENH: install StringDType promoter for add +* `#26014 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26015 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26016 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26017 `__: MAINT: avoid use of flexible array member in public header +* `#26024 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26025 `__: BUG: fix reference count leak in __array__ internals +* `#26027 `__: BUG: add missing error handling in string to int cast internals +* `#26033 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26045 `__: ENH: Optimize np.power for integer type +* `#26055 `__: ENH: Optimize np.power(x, 2) for double and float type +* `#26063 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26064 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26066 `__: BUG: Allow the new string dtype summation to work +* `#26067 `__: DOC: note stringdtype output support in np.strings docstrings +* `#26070 `__: DOC clarifications on debugging numpy +* `#26071 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26080 `__: BUG: adapt cython files to new complex declarations +* `#26081 `__: TYP: Make array _ShapeType bound and covariant +* `#26082 `__: ENH: Add partition/rpartition ufunc for string dtypes +* `#26083 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26089 `__: TYP: Adjust typing for ``np.random.integers`` and ``np.random.randint`` +* `#26090 `__: API: Require reduce promoters to start with None to match +* `#26095 `__: MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 +* `#26097 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26099 `__: DOC: fix typo in doc/source/user/absolute_beginners.rst +* `#26103 `__: API: Default to hidden visibility for API tables +* `#26105 `__: MAINT: install all-string promoter for multiply +* `#26108 `__: MAINT: Remove unnecessarily defensive code from dlpack deleter +* `#26112 `__: TST: fix incorrect dtype in test +* `#26113 `__: BLD: Do not use -O3 flag when building in debug mode +* `#26116 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26119 `__: BUG: fix reference counting error in stringdtype setup +* `#26123 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26125 `__: DOC: Bump pydata-sphinx-theme version +* `#26128 `__: DOC: Update absolute_beginners.rst +* `#26129 `__: MAINT: add missing noexcept clauses +* `#26130 `__: ENH: Optimize performance of np.atleast_1d +* `#26133 `__: MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 +* `#26134 `__: CI, BLD: Push NumPy's Emscripten/Pyodide wheels nightly to Anaconda.org... +* `#26135 `__: BUG: masked array division should ignore all FPEs in mask calculation +* `#26136 `__: BUG: fixed datetime64[ns] conversion issue in numpy.vectorize,... +* `#26138 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26139 `__: MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 +* `#26142 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26147 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26149 `__: MAINT: Escalate import warning to an import error +* `#26151 `__: BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT +* `#26155 `__: NEP: add NEP 56 mailing list resolution +* `#26160 `__: ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes +* `#26162 `__: BUG: Infinite Loop in numpy.base_repr +* `#26168 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26172 `__: DOC, TST: make ``numpy.version`` officially public +* `#26174 `__: MAINT: Fix failure in routines.version.rst +* `#26182 `__: DOC: Update absolute_beginners.rst +* `#26185 `__: MAINT: Update Pyodide to 0.25.1 +* `#26187 `__: TST: Use platform.machine() for improved portability on riscv64 +* `#26189 `__: MNT: use pythoncapi_compat.h in npy_compat.h +* `#26190 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26207 `__: TST: account for immortal objects in test_iter_refcount +* `#26210 `__: API: Readd ``np.bool_`` typing stub +* `#26212 `__: BENCH: Add benchmarks for np.power(x,2) and np.power(x,0.5) +* `#26213 `__: MNT: try updating pythoncapi-compat +* `#26215 `__: API: Enforce one copy for ``__array__`` when ``copy=True`` +* `#26219 `__: ENH: Enable RVV CPU feature detection +* `#26222 `__: MAINT: Drop Python 3.9 +* `#26227 `__: MAINT: utilize ufunc API const correctness internally +* `#26229 `__: TST: skip limited API test on nogil python build +* `#26232 `__: MAINT: fix typo in _add_newdoc_ufunc docstring +* `#26235 `__: Update numpy.any documentation example +* `#26237 `__: MAINT: Update ``array-api-tests`` job +* `#26239 `__: DOC: add versionadded for copy keyword in np.asarray docstring +* `#26241 `__: DOC: Fixup intp/uintp documentation for ssize_t/size_t changes +* `#26245 `__: DOC: Update ``__array__`` ``copy`` keyword docs +* `#26246 `__: MNT: migrate PyList_GetItem usages to PyList_GetItemRef +* `#26248 `__: MAINT,BUG: Robust string meson template substitution +* `#26251 `__: MNT: disable the allocator cache for nogil builds +* `#26258 `__: BLD: update to OpenBLAS 0.3.27 +* `#26260 `__: BUG: Ensure seed sequences are restored through pickling +* `#26261 `__: ENH: introduce a notion of "compatible" stringdtype instances +* `#26263 `__: MAINT: fix typo +* `#26264 `__: MAINT: fix typo in #include example +* `#26267 `__: MAINT: Update URL in nep 0014 - domain change +* `#26268 `__: API: Disallow 0D input arrays in ``nonzero`` +* `#26270 `__: BUG: ensure np.vectorize doesn't truncate fixed-width strings +* `#26273 `__: ENH: Bump Highway to HEAD and remove platform filter +* `#26274 `__: BLD: use install-tags to optionally install tests +* `#26280 `__: ENH: Speedup clip for floating point +* `#26281 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26282 `__: MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef +* `#26284 `__: TST: run the smoke tests on more python versions +* `#26285 `__: ENH: Decrease wall time of ``ma.cov`` and ``ma.corrcoef`` +* `#26286 `__: BLD: ensure libnpymath and highway static libs use hidden visibility +* `#26292 `__: API: Add ``shape`` and ``copy`` arguments to ``numpy.reshape`` +* `#26294 `__: MNT: disable the coercion cache for the nogil build +* `#26295 `__: CI: add llvm/clang sanitizer tests +* `#26299 `__: MAINT: Pin sphinx to version 7.2.6 +* `#26302 `__: BLD: use newer openblas wheels [wheel build] +* `#26303 `__: DOC: add explanation of dtype to parameter values for np.append +* `#26304 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26309 `__: MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 +* `#26311 `__: DOC: Follow-up fixes for new theme +* `#26313 `__: MAINT: Cleanup ``vecdot``'s signature, typing, and importing +* `#26317 `__: BUG: use PyArray_SafeCast in array_astype +* `#26319 `__: BUG: fix spin bench not running on Windows +* `#26320 `__: DOC: Add replacement NEP links in superseded, replaced-by fields +* `#26322 `__: DOC: Documentation and examples for conversion of np.timedelta64... +* `#26324 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26325 `__: TST: Skip Cython test for editable install +* `#26329 `__: MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 +* `#26338 `__: MAINT: update x86-simd-sort to latest +* `#26340 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26347 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 +* `#26348 `__: NOGIL: Make loop data cache and dispatch cache thread-safe in... +* `#26353 `__: BUG: ensure text padding ufuncs handle stringdtype nan-like nulls +* `#26354 `__: BUG: Fix rfft for even input length. +* `#26355 `__: ENH: add support for nan-like null strings in string replace +* `#26359 `__: MAINT: Simplify bugfix for even rfft +* `#26362 `__: MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 +* `#26363 `__: MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 +* `#26364 `__: TST: static types are now immortal in the default build too +* `#26368 `__: [NOGIL] thread local promotion state +* `#26369 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26372 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26377 `__: TYP: Fix incorrect type hint for creating a recarray from fromrecords +* `#26378 `__: DOC: Update internal links for generator.rst and related +* `#26384 `__: BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ +* `#26385 `__: DOC: Updated remaining links in random folder +* `#26386 `__: DOC: Improve example on array broadcasting +* `#26388 `__: BUG: Use Python pickle protocol version 4 for np.save +* `#26391 `__: DOC: Add missing methods to numpy.strings docs +* `#26392 `__: BUG: support nan-like null strings in [l,r]strip +* `#26396 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26399 `__: DOC: Update INSTALL.rst +* `#26413 `__: DOC: Fix some typos and incorrect markups +* `#26415 `__: MAINT: updated instructions to get MachAr byte pattern +* `#26416 `__: MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 +* `#26418 `__: DOC: add reference docs for NpyString C API +* `#26419 `__: MNT: clean up references to array_owned==2 case in StringDType +* `#26426 `__: TYP,TST: Bump mypy to 1.10.0 +* `#26428 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26429 `__: TYP: npyio: loadtxt: usecols: add None type +* `#26431 `__: TST: skip test_frompyfunc_leaks in the free-threaded build +* `#26432 `__: MAINT: Add some PR prefixes to the labeler. +* `#26436 `__: BUG: fixes for three related stringdtype issues +* `#26441 `__: BUG: int32 and intc should both appear in sctypes +* `#26442 `__: DOC: Adding links to polynomial table. +* `#26443 `__: TST: temporarily pin spin to work around issue in 0.9 release +* `#26444 `__: DOC: Remove outdated authentication instructions +* `#26445 `__: TST: fix xfailed tests on pypy 7.3.16 +* `#26447 `__: TST: attempt to fix intel SDE SIMD CI +* `#26449 `__: MAINT: fix typo +* `#26452 `__: DEP: Deprecate 'fix_imports' flag in numpy.save +* `#26456 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26463 `__: TST: add basic free-threaded CI testing +* `#26464 `__: BLD: update vendored-meson to current Meson master (1.4.99) +* `#26469 `__: MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 +* `#26471 `__: BLD: cp313 [wheel build] +* `#26474 `__: BLD: Make NumPy build reproducibly +* `#26476 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26478 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to SPHINXOPTS +* `#26480 `__: TYP: fix type annotation for ``newbyteorder`` +* `#26481 `__: Improve documentation of numpy.ma.filled +* `#26486 `__: MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 +* `#26487 `__: MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 +* `#26488 `__: DOC: add examples to get_printoptions +* `#26489 `__: DOC: add example to get_include +* `#26492 `__: DOC: fix rng.random example in numpy-for-matlab-users +* `#26501 `__: ENH: Implement DLPack version 1 +* `#26503 `__: TST: work around flaky test on free-threaded build +* `#26504 `__: DOC: Copy-edit numpy 2.0 migration guide. +* `#26505 `__: DOC: update the NumPy Roadmap +* `#26507 `__: MAINT: mark temp elision address cache as thread local +* `#26511 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26512 `__: CI: enable free-threaded wheel builds [wheel build] +* `#26514 `__: MAINT: Avoid gcc compiler warning +* `#26515 `__: MAINT: Fix GCC -Wmaybe-uninitialized warning +* `#26517 `__: DOC: Add missing functions to the migration guide +* `#26519 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26520 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26524 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26526 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26529 `__: BUG: ``PyDataMem_SetHandler`` check capsule name +* `#26531 `__: BUG: Fix entry-point of Texinfo docs +* `#26534 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI +* `#26537 `__: BUG: Fix F77 ! comment handling +* `#26538 `__: DOC: Update ``gradient`` docstrings +* `#26546 `__: MAINT: Remove redundant print from bug report issue template +* `#26548 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26550 `__: BUG: Make Polynomial evaluation adhere to nep 50 +* `#26552 `__: BUG: Fix in1d fast-path range +* `#26558 `__: BUG: fancy indexing copy +* `#26559 `__: BUG: fix setxor1d when input arrays aren't 1D +* `#26562 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26563 `__: BUG: Fix memory leaks found with valgrind +* `#26564 `__: CI, BLD: Upgrade to Pyodide 0.26.0 for Emscripten/Pyodide CI... +* `#26566 `__: DOC: update ufunc tutorials to use setuptools +* `#26567 `__: BUG: fix memory leaks found with valgrind (next) +* `#26568 `__: MAINT: Unpin pydata-sphinx-theme +* `#26571 `__: DOC: Added web docs for missing ma and strings routines +* `#26572 `__: ENH: Add array API inspection functions +* `#26579 `__: ENH: Add unstack() +* `#26580 `__: ENH: Add copy and device keyword to np.asanyarray to match np.asarray +* `#26582 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26585 `__: MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 +* `#26586 `__: BUG: Fix memory leaks found by valgrind +* `#26589 `__: BUG: catch invalid fixed-width dtype sizes +* `#26594 `__: DOC: Update constants.rst: fix URL redirect +* `#26597 `__: ENH: Better error message for axis=None in ``np.put_along_axis``... +* `#26599 `__: ENH: use size-zero dtype for broadcast-shapes +* `#26602 `__: TST: Re-enable int8/uint8 einsum tests +* `#26603 `__: BUG: Disallow string inputs for ``copy`` keyword in ``np.array``... +* `#26604 `__: refguide-check with pytest as a runner +* `#26605 `__: DOC: fix typos in numpy v2.0 documentation +* `#26606 `__: DOC: Update randn() to use rng.standard_normal() +* `#26607 `__: MNT: Reorganize non-constant global statics into structs +* `#26609 `__: DOC: Updated notes and examples for np.insert. +* `#26610 `__: BUG: np.take handle 64-bit indices on 32-bit platforms +* `#26611 `__: MNT: Remove ``set_string_function`` +* `#26614 `__: MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 +* `#26619 `__: TST: Re-enable ``test_shift_all_bits`` on clang-cl +* `#26626 `__: DOC: add ``getbufsize`` example +* `#26627 `__: DOC: add ``setbufsize`` example +* `#26628 `__: DOC: add ``matrix_transpose`` example +* `#26629 `__: DOC: add ``unique_all`` example +* `#26630 `__: DOC: add ``unique_counts`` example +* `#26631 `__: DOC: add ``unique_inverse`` example +* `#26632 `__: DOC: add ``unique_values`` example +* `#26633 `__: DOC: fix ``matrix_transpose`` doctest +* `#26634 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26636 `__: MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 +* `#26637 `__: BUG: fix incorrect randomized parameterization in bench_linalg +* `#26638 `__: MNT: use reproducible RNG sequences in benchmarks +* `#26639 `__: MNT: more benchmark cleanup +* `#26641 `__: DOC: Update 2.0 migration guide +* `#26644 `__: DOC: Added clean_dirs to spin docs to remove generated folders +* `#26645 `__: DOC: Enable web docs for numpy.trapezoid and add back links +* `#26646 `__: DOC: Update docstring for invert function +* `#26655 `__: CI: modified CI job to test editable install +* `#26658 `__: MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 +* `#26662 `__: DOC: add CI and NEP commit acronyms +* `#26664 `__: CI: build and upload free-threaded nightly wheels for macOS +* `#26667 `__: BUG: Adds asanyarray to start of linalg.cross +* `#26670 `__: MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 +* `#26672 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26675 `__: CI: Use default llvm on Windows. +* `#26676 `__: MAINT: mark evil_global_disable_warn_O4O8_flag as thread-local +* `#26679 `__: DOC: add ``np.linalg`` examples +* `#26680 `__: remove doctesting from refguide-check, add ``spin check-tutorials`` +* `#26684 `__: MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 +* `#26685 `__: MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 +* `#26686 `__: MAINT: Add comment lost in previous PR. +* `#26691 `__: BUILD: check for scipy-doctest, remove it from requirements +* `#26692 `__: DOC: document workaround for deprecation of dim-2 inputs to ``cross`` +* `#26693 `__: BUG: allow replacement in the dispatch cache +* `#26702 `__: DOC: Added missing See Also sections in Polynomial module +* `#26703 `__: BUG: Handle ``--f77flags`` and ``--f90flags`` for ``meson`` +* `#26706 `__: TST: Skip an f2py module test on Windows +* `#26714 `__: MAINT: Update main after 2.0.0 release. +* `#26716 `__: DOC: Add clarifications np.argpartition +* `#26717 `__: DOC: Mention more error paths and try to consolidate import errors +* `#26721 `__: DOC, MAINT: Turn on version warning banner provided by PyData... +* `#26722 `__: DOC: Update roadmap a bit more +* `#26724 `__: ENH: Add Array API 2023.12 version support +* `#26737 `__: DOC: Extend release notes for #26611 +* `#26739 `__: DOC: Update NEPs statuses +* `#26741 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26742 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26744 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26750 `__: ENH: Add locking to umath_linalg if no lapack is detected at... +* `#26760 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26762 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26766 `__: ENH: Support integer dtype inputs in rounding functions +* `#26769 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26770 `__: DOC, NEP: Update NEP44 +* `#26771 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26776 `__: BUG: remove numpy.f2py from excludedimports +* `#26780 `__: MAINT: use an atomic load/store and a mutex to initialize the... +* `#26788 `__: TYP: fix missing ``sys`` import in numeric.pyi +* `#26789 `__: BUG: avoid side-effect of 'include complex.h' +* `#26790 `__: DOC: Update link to Python stdlib random. +* `#26795 `__: BUG: add order to out array of ``numpy.fft`` +* `#26797 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26799 `__: MNT: Update dlpack docs and typing stubs +* `#26802 `__: Missing meson pass-through argument +* `#26805 `__: DOC: Update 2.0 migration guide and release note +* `#26808 `__: DOC: Change selected hardlinks to NEPs to intersphinx mappings +* `#26811 `__: DOC: update notes on sign for complex numbers +* `#26812 `__: CI,TST: Fix meson tests needing gfortran [wheel build] +* `#26813 `__: TST: fix 'spin test single_test' for future versions of spin +* `#26814 `__: DOC: Add ``>>> import numpy as np`` stubs everywhere +* `#26815 `__: MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 +* `#26826 `__: DOC: remove hack to override _add_newdocs_scalars +* `#26827 `__: DOC: AI-Gen examples ctypeslib.as_ctypes_types +* `#26828 `__: DOC: AI generated examples for ma.left_shift. +* `#26829 `__: DOC: AI-Gen examples for ma.put +* `#26830 `__: DOC: AI generated examples for ma.reshape +* `#26831 `__: DOC: AI generated examples for ma.correlate. +* `#26833 `__: MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 +* `#26841 `__: BENCH: Missing ufunc in benchmarks +* `#26842 `__: BUILD: clean out py2 stuff from npy_3kcompat.h +* `#26846 `__: MAINT: back printoptions with a true context variable +* `#26847 `__: TYP: fix ``ufunc`` method type annotations +* `#26848 `__: TYP: include the ``|`` prefix for ``dtype`` char codes +* `#26849 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26858 `__: TYP: Annotate type aliases as ``typing.TypeAlias`` +* `#26866 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 +* `#26867 `__: TYP,BUG: fix ``numpy.__dir__`` annotations +* `#26871 `__: TYP: adopt ``typing.LiteralString`` and use more of ``typing.Literal`` +* `#26872 `__: TYP: use ``types.CapsuleType`` on python>=3.13 +* `#26873 `__: TYP: improved ``numpy._array_api_info`` typing +* `#26875 `__: TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` +* `#26877 `__: BUG: start applying ruff/flake8-implicit-str-concat rules (ISC) +* `#26879 `__: MAINT: start applying ruff/flake8-simplify rules (SIM) +* `#26880 `__: DOC: Fix small incorrect markup +* `#26881 `__: DOC, MAINT: fix typos found by codespell +* `#26882 `__: MAINT: start applying ruff/pyupgrade rules (UP) +* `#26883 `__: BUG: Make issctype always return bool. +* `#26884 `__: MAINT: Remove a redundant import from the generated __ufunc_api.h. +* `#26889 `__: API: Add ``device`` and ``to_device`` to scalars +* `#26891 `__: DOC: Add a note that one should free the proto struct +* `#26892 `__: ENH: Allow use of clip with Python integers to always succeed +* `#26894 `__: MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 +* `#26895 `__: DOC: Change documentation copyright strings to use a dynamic... +* `#26896 `__: DOC: Change NEP hardlinks to intersphinx mappings. +* `#26897 `__: TYP: type hint ``numpy.polynomial`` +* `#26901 `__: BUG: ``np.loadtxt`` return F_CONTIGUOUS ndarray if row size is... +* `#26902 `__: Apply some ruff/flake8-bugbear rules (B004 and B005) +* `#26903 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26904 `__: BUG,ENH: Fix generic scalar infinite recursion issues +* `#26905 `__: API: Do not consider subclasses for NEP 50 weak promotion +* `#26906 `__: MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 +* `#26908 `__: ENH: Provide a hook for gufuncs to process core dimensions. +* `#26913 `__: MAINT: declare that NumPy's C extensions support running without... +* `#26914 `__: API: Partially revert unique with return_inverse +* `#26919 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26923 `__: MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 +* `#26924 `__: MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 +* `#26927 `__: TYP: Transparent ``__array__`` shape-type +* `#26928 `__: TYP: Covariant ``numpy.flatiter`` type parameter +* `#26929 `__: TYP: Positional-only dunder binop method parameters +* `#26930 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26931 `__: DOC, BUG: Fix running full test command in docstring +* `#26934 `__: MAINT: add PyArray_ZeroContiguousBuffer helper and use it in... +* `#26935 `__: BUG: fix ``f2py`` tests to work with v2 API +* `#26937 `__: TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing... +* `#26938 `__: TYP,BUG: Fix ``dtype`` type alias specialization issue in ``__init__.pyi`` +* `#26942 `__: TYP: Improved ``numpy.generic`` rich comparison operator type... +* `#26943 `__: TYP,BUG: Remove non-existant ``numpy.__git_version__`` in the... +* `#26946 `__: TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` +* `#26950 `__: MAINT: add freethreading_compatible directive to cython build +* `#26953 `__: TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` +* `#26954 `__: TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the... +* `#26964 `__: DOC: Issue template for static typing +* `#26968 `__: MAINT: add a 'tests' install tag to the `numpy._core._simd` extension... +* `#26969 `__: BUG: Fix unicode strip +* `#26972 `__: BUG: Off by one in memory overlap check +* `#26975 `__: TYP: Use ``Final`` and ``LiteralString`` for the constants in... +* `#26980 `__: DOC: add sphinx-copybutton +* `#26981 `__: ENH: add support in f2py to declare gil-disabled support +* `#26983 `__: TYP,BUG: Type annotations for ``numpy.trapezoid`` +* `#26984 `__: TYP,BUG: Fix potentially unresolved typevar in ``median`` and... +* `#26985 `__: BUG: Add object cast to avoid warning with limited API +* `#26989 `__: DOC: fix ctypes example +* `#26991 `__: MAINT: mark scipy-openblas nightly tests as allowed to fail +* `#26992 `__: TYP: Covariant ``numpy.ndenumerate`` type parameter +* `#26993 `__: TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_``... +* `#26996 `__: ENH: Add ``__slots__`` to private (sub-)classes in ``numpy.lib._index_tricks_impl`` +* `#27002 `__: MAINT: Update main after 2.0.1 release. +* `#27008 `__: TYP,BUG: Complete type stubs for ``numpy.dtypes`` +* `#27009 `__: TST, MAINT: Loosen required test precision +* `#27010 `__: DOC: update tutorials link +* `#27011 `__: MAINT: replace PyThread_type_lock with PyMutex on Python >= 3.13.0b3 +* `#27013 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27014 `__: BUG: fix gcd inf +* `#27015 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27017 `__: DOC: Release note for feature added in gh-26908. +* `#27019 `__: TYP: improved ``numpy.array`` type hints for array-like input +* `#27025 `__: DOC: Replace np.matrix in .view() docstring example. +* `#27026 `__: DOC: fix tiny typo +* `#27027 `__: BUG: Fix simd loadable stride logic +* `#27031 `__: DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions'... +* `#27034 `__: BUG: random: Fix edge case of Johnk's algorithm for the beta... +* `#27041 `__: MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 +* `#27043 `__: CI: unify free-threaded wheel builds with other builds +* `#27046 `__: BUG: random: prevent zipf from hanging when parameter is large. +* `#27047 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27048 `__: BUG: random: Fix long delays/hangs with zipf(a) when a near 1. +* `#27050 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27051 `__: TST: Refactor to consistently use CompilerChecker +* `#27052 `__: TST: fix issues with tests that use numpy.testing.extbuild +* `#27055 `__: MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 +* `#27056 `__: MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 +* `#27057 `__: BUG: fix another cast setup in array_assign_subscript +* `#27058 `__: DOC: Add some missing examples for ``np.strings`` methods +* `#27059 `__: ENH: Disable name suggestions on some AttributeErrors +* `#27060 `__: MAINT: linalg: Simplify some linalg gufuncs. +* `#27070 `__: BUG: Bump Highway to latest master +* `#27076 `__: DEP: lib: Deprecate acceptance of float (and more) in bincount. +* `#27079 `__: MAINT: 3.9/10 cleanups +* `#27081 `__: CI: Upgrade ``array-api-tests`` +* `#27085 `__: ENH: fixes for warnings on free-threaded wheel builds +* `#27087 `__: ENH: mark the dragon4 scratch space as thread-local +* `#27090 `__: DOC: update np.shares_memory() docs +* `#27091 `__: API,BUG: Fix copyto (and ufunc) handling of scalar cast safety +* `#27094 `__: DOC: Add release note about deprecation introduced in gh-27076. +* `#27095 `__: DOC: Fix indentation of a few release notes. +* `#27096 `__: BUG: Complex printing tests fail on Windows ARM64 +* `#27097 `__: MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 +* `#27098 `__: BUG: add missing error handling in public_dtype_api.c +* `#27102 `__: DOC: Fixup promotion doc +* `#27104 `__: BUG: Fix building NumPy in FIPS mode +* `#27108 `__: DOC: remove incorrect docstring comment +* `#27110 `__: BLD: cp313 cp313t linux_aarch64 [wheel build] +* `#27112 `__: BUG: Fix repr for integer scalar subclasses +* `#27113 `__: DEV: make linter.py runnable from outside the root of the repo +* `#27114 `__: MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 +* `#27115 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27117 `__: BUG: Ensure that scalar binops prioritize __array_ufunc__ +* `#27118 `__: BLD: update vendored Meson for cross-compilation patches +* `#27123 `__: BUG: Bump Highway to latest +* `#27124 `__: MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 +* `#27125 `__: MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 +* `#27127 `__: BUG: Fix missing error return in copyto +* `#27144 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27149 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27162 `__: BLD: use smaller scipy-openblas builds +* `#27166 `__: ENH: fix thread-unsafe C API usages +* `#27173 `__: MAINT: Bump pythoncapi-compat version. +* `#27176 `__: REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] +* `#27180 `__: DOC: Add release notes for #26897 +* `#27181 `__: DOC: Add release notes for #27008 +* `#27190 `__: BUILD: use a shrunken version of scipy-openblas wheels [wheel... +* `#27193 `__: REV: Revert undef I and document it +* `#27196 `__: BUILD: improve download script +* `#27197 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27200 `__: DOC: add free-threading release notes +* `#27209 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27216 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27217 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27229 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27233 `__: DOC: add docs on thread safety in NumPy +* `#27234 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit diff --git a/doc/changelog/2.1.1-changelog.rst b/doc/changelog/2.1.1-changelog.rst new file mode 100644 index 000000000000..d18636771e1a --- /dev/null +++ b/doc/changelog/2.1.1-changelog.rst @@ -0,0 +1,30 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines diff --git a/doc/changelog/2.1.2-changelog.rst b/doc/changelog/2.1.2-changelog.rst new file mode 100644 index 000000000000..bd0f7bd2422c --- /dev/null +++ b/doc/changelog/2.1.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/changelog/2.1.3-changelog.rst b/doc/changelog/2.1.3-changelog.rst new file mode 100644 index 000000000000..073bd002e7ca --- /dev/null +++ b/doc/changelog/2.1.3-changelog.rst @@ -0,0 +1,49 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters diff --git a/doc/changelog/2.2.0-changelog.rst b/doc/changelog/2.2.0-changelog.rst new file mode 100644 index 000000000000..b82a3d03b4fc --- /dev/null +++ b/doc/changelog/2.2.0-changelog.rst @@ -0,0 +1,437 @@ + +Contributors +============ + +A total of 106 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !Dreamge + +* !bersbersbers + +* !fengluoqiuwu + +* !h-vetinari +* !hutauf + +* !musvaage + +* !nullSoup + +* Aarni Koskela + +* Abhishek Kumar + +* Abraham Medina + +* Aditi Juneja + +* Adrien Corenflos + +* Agriya Khetarpal +* Ajay Kumar Janapareddi +* Akula Guru Datta + +* Amit Subhash Chejara + +* Andrew Nelson +* Anne Gunn +* Austin Ran + +* Ben Walsh +* Benjamin A. Beasley + +* Benoit Prabel + +* Charles Harris +* Chris Fu (傅立业) +* Chris Sidebottom +* Christian Lorentzen +* Christopher Sidebottom +* Clément Robert +* Dane Reimers + +* Dimitri Papadopoulos Orfanos +* Evgeni Burovski +* GUAN MING +* Habiba Hye + +* Harry Zhang + +* Hugo van Kemenade +* Ian Harris + +* Isaac Warren + +* Ishan Koradia + +* Ishan Purekar + +* Jake VanderPlas +* Jianyu Wen + +* Johannes Kaisinger +* John Kirkham +* Joren Hammudoglu +* João Eiras + +* KM Khalid Saifullah + +* Karel Planken + +* Katie Rust + +* Khem Raj +* Kira Prokopenko + +* Lars Grüter +* Linus Sommer +* Lucas Colley +* Luiz Eduardo Amaral +* Luke Aarohi + +* Marcel Telka + +* Mark Harfouche +* Marten van Kerkwijk +* Maryanne Wachter + +* Mateusz Sokół +* Matt Haberland +* Matthias Diener + +* Matthieu Darbois +* Matti Picus +* Maximilian Weigand + +* Melissa Weber Mendonça +* Michael Davidsaver + +* Nathan Goldbaum +* Nicolas Tessore + +* Nitish Satyavolu + +* Oscar Armas-Luy + +* Peter Hawkins +* Peter Kämpf + +* Pieter Eendebak +* Raghu Rajan + +* Raghuveer Devulapalli +* Ralf Gommers +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Ryan Teoh + +* Santhana Mikhail Antony S + +* Sayed Adel +* Sebastian Berg +* Sebastian Vittersø + +* Sebin Thomas + +* Serge Panev + +* Shaurya Barkund + +* Shiv Katira + +* Simon Altrogge +* Slava Gorloff + +* Slobodan Miletic + +* Soutrik Bandyopadhyay + +* Stan Ulbrych + +* Stefan van der Walt +* Tim Hoffmann +* Timo Röhling +* Tyler Reddy +* Vahid Tavanashad + +* Victor Herdeiro + +* Vijayakumar Z + +* Warren Weckesser +* Xiao Yuan + +* Yashasvi Misra +* bilderbuchi + +* dependabot[bot] + +Pull requests merged +==================== + +A total of 317 pull requests were merged for this release. + +* `#14622 `__: BUG: fix datetime64/timedelta64 hash and match Python +* `#15181 `__: ENH: Add nd-support to trim_zeros +* `#17780 `__: ENH, BLD: Define RISCV-32 support +* `#23547 `__: DOC: Fix a typo in description and add an example of ``numpy.tensordot`` +* `#25984 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit +* `#26398 `__: DOC: order of indices returned in tril_indices and triu_indices +* `#26406 `__: DOC: Changed vdot docs as suggested +* `#26570 `__: CI, BLD: Use ``cibuildwheel`` to build WASM NumPy wheels +* `#26642 `__: DOC: Add examples to ``np.char`` +* `#26855 `__: TYP: improved ``numpy.frompyfunc`` type hints +* `#26857 `__: MAINT: Start applying ruff/Pycodestyle rules +* `#26865 `__: TYP: add missing annotations for ``numpy.object_.__new__`` +* `#26941 `__: TYP: Non-distributive ``numpy.generic`` type args. +* `#26944 `__: TYP: Annotate ``numpy._core._type_aliases`` . +* `#26979 `__: TYP: Explicit ``numpy.__all__`` in the stubs +* `#26994 `__: TYP: Typing fixes for ``numpy.iinfo`` & ``numpy.finfo`` +* `#27049 `__: BUG: f2py: better handle filtering of public/private subroutines +* `#27088 `__: WHL: bump (musl) linux image [wheel build] +* `#27100 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27101 `__: TST, DOC: add doc and test for transpose axes with negative indices +* `#27116 `__: DOC: update NEP 50 draft status to "Final" +* `#27119 `__: ENH: Use ``PyObject_GetOptionalAttr`` +* `#27132 `__: TYP: Assume that ``typing_extensions`` is always available in... +* `#27134 `__: REL: Prepare main for 2.2.0 development +* `#27139 `__: TYP: Fixed & improved ``numpy.dtype.__new__`` +* `#27140 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27143 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27145 `__: ENH: fix thread-unsafe C API usages +* `#27147 `__: BLD: use smaller scipy-openblas builds +* `#27148 `__: BUG: Raise if histogram cannot create finite bin sizes +* `#27150 `__: TYP: Sane defaults for the platform-specific ``NBitBase`` types. +* `#27152 `__: TYP: Simplified ufunc imports in ``numpy._typing`` +* `#27153 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27154 `__: TYP: Use ``typing_extensions.Self`` in the ``numpy`` stubs +* `#27156 `__: MAINT: Remove any promotion-state switching logic +* `#27157 `__: TYP: add td64 overload for ``np.mean`` +* `#27158 `__: CI: Re-enable nightly OpenBLAS test runs +* `#27160 `__: DEP: Finalize ``bool(empty_array)`` deprecation +* `#27164 `__: MAINT: use npy_argparse for einsum +* `#27168 `__: DOC: add td64 example in ``np.mean`` +* `#27171 `__: TYP: Shape-typed array constructors: ``numpy.{empty,zeros,ones,full}`` +* `#27177 `__: TYP: 1-d ``numpy.arange`` return shape-type +* `#27178 `__: TYP,TST: Bump mypy to 1.11.1 +* `#27179 `__: TYP: Improved ``numpy.piecewise`` type-hints +* `#27182 `__: REV: Revert undef I and document it +* `#27184 `__: BUILD: update to OpenBLAS 0.3.28 +* `#27187 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27189 `__: MAINT: improve download script +* `#27202 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27203 `__: DOC: update PyArray_CheckAxis doc +* `#27207 `__: TYP: Deprecate calling ``numpy.save`` with ``fix_imports`` (PEP... +* `#27208 `__: TYP: Disallow scalars and 0d-arrays in ``numpy.nonzero`` +* `#27210 `__: TYP: Semi-transparent ``numpy.shape`` shape-type annotations. +* `#27211 `__: TYP: Stop using ``Any`` as shape-type default +* `#27215 `__: MAINT: Bump github/codeql-action from 3.26.0 to 3.26.2 +* `#27218 `__: DEV: Add ``.editorconfig`` rules for Python +* `#27219 `__: TYP: Replace ``ellipsis`` with ``types.EllipsisType`` +* `#27220 `__: TYP: Fixed & improved ``TypeVar`` use for ``numpy.char.chararray`` +* `#27221 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.6 +* `#27223 `__: DOC: add docs on thread safety in NumPy +* `#27226 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27228 `__: DOC: Remove obsolete note from the top of the 2.0.0 release notes. +* `#27235 `__: MAINT: MSVC does not support #warning directive +* `#27237 `__: TYP: Fix several typing issues in ``numpy.polynomial`` +* `#27238 `__: DOC: update ``np.unique`` docstring +* `#27242 `__: MAINT: Update main after 2.1.0 release. +* `#27246 `__: MAINT: Bump github/codeql-action from 3.26.2 to 3.26.3 +* `#27247 `__: DOC: update documentation release process +* `#27249 `__: BUG: fix reference counting bug in __array_interface__ implementation +* `#27255 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27261 `__: TST: Add regression test for missing descr in array-interface +* `#27262 `__: BUG: Fix #27256 and #27257 +* `#27268 `__: MAINT: Bump github/codeql-action from 3.26.3 to 3.26.4 +* `#27272 `__: ENH: make check-{docs,tutorials} fail on dtype mismatch +* `#27275 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27277 `__: DOC/DEV/CI: mambaforge -> miniforge +* `#27281 `__: MAINT: Bump github/codeql-action from 3.26.4 to 3.26.5 +* `#27284 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27286 `__: MAINT: Update main after the 2.0.2 release +* `#27289 `__: MAINT: Start applying ruff rules (RUF) +* `#27290 `__: MAINT: Keep applying ruff/pyupgrade rules (UP) +* `#27291 `__: DOC, MAINT: Fix new typos found by codespell +* `#27292 `__: MAINT: Start applying ruff/flake8-type-checking rules (TCH) +* `#27293 `__: MAINT: Keep applying ruff/flake8-bugbear rules (B) +* `#27294 `__: BUILD: refactor circleci to use spin [skip actions][skip azp][skip... +* `#27295 `__: MAINT: Start applying rruff/flake8-pie rules (PIE) +* `#27296 `__: MAINT: Start applying ruff/flake8-comprehensions rules (C4) +* `#27297 `__: MAINT: Apply ruff/flake8-raise rules (RSE) +* `#27298 `__: MAINT: Apply ruff/flynt rules (FLY) +* `#27302 `__: BUG: Fix bug in ``doc/neps/tools/build_index.py`` +* `#27307 `__: MAINT: Apply ruff/pycodestyle warning rules (W) +* `#27311 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27312 `__: MAINT: Bump github/codeql-action from 3.26.5 to 3.26.6 +* `#27316 `__: BUILD: update pypy test version +* `#27320 `__: MAINT: increase max line length from 79 to 88, upgrade pycodestyle +* `#27322 `__: DOC: Removed reference to deprecated "newshape" parameter in... +* `#27323 `__: TYP: add ``ma.zeros_like`` and ``ma.ones_like`` typing +* `#27326 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27330 `__: BLD: Win-arm64 cross compile workflow +* `#27331 `__: MAINT: GitHub Actions: Replace deprecated macos-12 with macos-latest +* `#27332 `__: MAINT: Update main after 2.1.1 release. +* `#27334 `__: TYP: Concrete ``float64`` and ``complex128`` scalar types with... +* `#27335 `__: ENH: Add ``allow_pickle`` flag to ``savez`` +* `#27344 `__: MAINT: fix typos +* `#27346 `__: BUG,TYP: Allow subscripting ``iinfo`` and ``finfo`` generic types... +* `#27347 `__: DOC: Mention that c is reassigned but still points to a (quickstart) +* `#27353 `__: MNT, CI: Use separate jobs for WASM wheel builds/uploads +* `#27355 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27356 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27359 `__: MAINT: fix typo in random.binomial +* `#27360 `__: BUG: fix _shrink edge case in np.ma.mask_or +* `#27361 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27363 `__: DOC: Remove reshape from appearing twice in toctree +* `#27364 `__: DOC: Update np.\*stack doc to reflect behavior +* `#27365 `__: MAINT: Bump deadsnakes/action from 3.1.0 to 3.2.0 +* `#27369 `__: DOC: fix incorrect definitions +* `#27372 `__: CI: Update cirrus nightly token +* `#27376 `__: MAINT: Fix a few typos - and sometimes improve wording +* `#27381 `__: DOC: add vecdot to 'See also' of np.dot and np.inner +* `#27384 `__: MAINT: Fix a few more typos +* `#27385 `__: DOC: Update np.unique_all example to demonstrate namedtuple output +* `#27387 `__: DOC: Clarify np.searchsorted documentation and add example for... +* `#27390 `__: MAINT: Bump github/codeql-action from 3.26.6 to 3.26.7 +* `#27391 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.0 +* `#27392 `__: BUG: apply critical sections around populating the dispatch cache +* `#27403 `__: DOC: Fix minor issues in arrays.promotion.rst +* `#27406 `__: BUG: Stub out ``get_build_msvc_version`` if ``distutils.msvccompiler``... +* `#27408 `__: DOC: more informative _excluded_ argument explanation in np.vectorize +* `#27412 `__: MAINT: Bump pypa/cibuildwheel from 2.21.0 to 2.21.1 +* `#27414 `__: MAINT: add Python 3.13 to classifiers +* `#27417 `__: TYP: Allow callable ``converters`` arg in ``numpy.loadtxt`` +* `#27418 `__: TYP: Fix default return dtype of ``numpy.random.Generator.integers``... +* `#27419 `__: TYP: Modernized ``numpy.dtypes`` annotations +* `#27420 `__: TYP: Optional 2nd ``numpy.complexfloating`` type parameter +* `#27421 `__: BUG: Add regression test for gh-27273 +* `#27423 `__: TYP: Add missing type arguments +* `#27424 `__: DOC: Add release notes for #27334 +* `#27425 `__: MAINT: Use correct Python interpreter in tests +* `#27426 `__: MAINT: Bump github/codeql-action from 3.26.7 to 3.26.8 +* `#27427 `__: TYP: Fixed & improved type-hinting for ``any`` and ``all`` +* `#27429 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27430 `__: TYP: Fix type of ``copy`` argument in ``ndarray.reshape`` +* `#27431 `__: BUG: Allow unsigned shift argument for np.roll +* `#27434 `__: ENH: make np.dtype(scalar_type) return the default dtype instance +* `#27438 `__: BUG: Disable SVE VQSort +* `#27440 `__: DOC: Add a link to the migration guide for the deprecation warning... +* `#27441 `__: DOC: remove old versionadded comments from arrays.classes.rst +* `#27442 `__: DOC: Remove old versionchanged directives from config.rst +* `#27443 `__: updated the version of mean param from the release notes (2.0.0) +* `#27444 `__: TST: Added the test case for masked array tofile failing +* `#27445 `__: DOC: removed older versionadded directives to ufuncs.rst +* `#27448 `__: DOC: Example for char.array +* `#27453 `__: DOC: Added docstring for numpy.ma.take() function. +* `#27454 `__: DOC: Remove outdated versionadded/changed directives +* `#27458 `__: MAINT: Bump github/codeql-action from 3.26.8 to 3.26.9 +* `#27464 `__: DOC: Fix a copy-paste mistake in the cumulative_sum docstring. +* `#27465 `__: DOC: update ndindex reference in np.choose docstring +* `#27466 `__: BUG: rfftn axis bug +* `#27469 `__: DOC: Added ``CONTRIBUTING.rst`` +* `#27470 `__: TYP: Add type stubs for stringdtype in np.char and np.strings +* `#27472 `__: MAINT: Check for SVE support on demand +* `#27475 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27478 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27482 `__: Show shape any time it cannot be inferred in repr +* `#27485 `__: MAINT: Bump github/codeql-action from 3.26.9 to 3.26.10 +* `#27486 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.5.0... +* `#27490 `__: API: register NEP 35 functions as array_functions +* `#27491 `__: MAINT: Bump mamba-org/setup-micromamba from 1.9.0 to 1.10.0 +* `#27495 `__: MAINT: Bump pypa/cibuildwheel from 2.21.1 to 2.21.2 +* `#27496 `__: MAINT: Bump mamba-org/setup-micromamba from 1.10.0 to 2.0.0 +* `#27497 `__: DOC: Correct selected C docstrings to eliminate warnings +* `#27499 `__: DOC: fix missing arguments (copy and device) from asanyarray's... +* `#27502 `__: MAINT: Bump github/codeql-action from 3.26.10 to 3.26.11 +* `#27503 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ +* `#27504 `__: ENH: Allow ``ndarray.__array_function__`` to dispatch functions... +* `#27508 `__: MAINT: Pin setuptools for testing [wheel build] +* `#27510 `__: TYP: Mark stub-only classes as ``@type_check_only`` +* `#27511 `__: TYP: Annotate type aliases without annotation +* `#27513 `__: MAINT: Update main after NumPy 2.1.2 release +* `#27517 `__: BENCH: Add benchmarks for np.non_zero +* `#27518 `__: TST: Add tests for np.nonzero with different input types +* `#27520 `__: TYP: Remove unused imports in the stubs +* `#27521 `__: TYP: Fill in the missing ``__all__`` exports +* `#27524 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.0 +* `#27525 `__: MAINT: Bump actions/upload-artifact from 4.4.0 to 4.4.1 +* `#27526 `__: MAINT: Bump github/codeql-action from 3.26.11 to 3.26.12 +* `#27532 `__: MAINT: Bump actions/cache from 4.1.0 to 4.1.1 +* `#27534 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27535 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27536 `__: MAINT: Bump actions/upload-artifact from 4.4.1 to 4.4.3 +* `#27549 `__: BUG: weighted quantile for some zero weights +* `#27550 `__: BLD: update vendored Meson to 1.5.2 +* `#27551 `__: MAINT: Bump github/codeql-action from 3.26.12 to 3.26.13 +* `#27553 `__: BLD: rename ``meson_options.txt`` to ``meson.options`` +* `#27555 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27556 `__: DOC: Clarify use of standard deviation in mtrand.pyx +* `#27557 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27558 `__: MAINT: distutils: remove obsolete search for ``ecc`` executable +* `#27560 `__: CI: start building Windows free-threaded wheels +* `#27564 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27567 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27568 `__: BUILD: vendor tempita from Cython +* `#27579 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27586 `__: MAINT: Update Highway to latest +* `#27587 `__: BLD: treat SVML object files better to avoid compiler warnings +* `#27595 `__: DOC: Clarify obj parameter types in numpy.delete documentation +* `#27598 `__: DOC: add examples to ctypeslib +* `#27602 `__: Update documentation for floating-point precision and determinant... +* `#27604 `__: DOC: Fix rendering in docstring of nan_to_num +* `#27612 `__: ENH: Add comments to ``string_fastsearch.h`` , rename some C-methods +* `#27613 `__: BUG: Fix Linux QEMU CI workflow +* `#27615 `__: ENH: Fix np.insert to handle boolean arrays as masks +* `#27617 `__: DOC: Update the RELEASE_WALKTHROUGH.rst file. +* `#27619 `__: MAINT: Bump actions/cache from 4.1.1 to 4.1.2 +* `#27620 `__: MAINT: Bump actions/dependency-review-action from 4.3.4 to 4.3.5 +* `#27621 `__: MAINT: Bump github/codeql-action from 3.26.13 to 3.27.0 +* `#27627 `__: ENH: Re-enable VSX from build targets for sin/cos +* `#27630 `__: ENH: Extern memory management to Cython +* `#27634 `__: MAINT: Bump actions/setup-python from 5.2.0 to 5.3.0 +* `#27636 `__: BUG: fixes for StringDType/unicode promoters +* `#27643 `__: BUG : avoid maximum fill value of datetime and timedelta return... +* `#27644 `__: DOC: Remove ambiguity in docs for ndarray.byteswap() +* `#27650 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27652 `__: TYP,TST: Bump ``mypy`` from ``1.11.1`` to ``1.13.0`` +* `#27653 `__: TYP: Fix Array API method signatures +* `#27659 `__: TYP: Transparent ``ndarray`` unary operator method signatures +* `#27661 `__: BUG: np.cov transpose control +* `#27663 `__: MAINT: fix wasm32 runtime type error in numpy._core +* `#27664 `__: MAINT: Bump actions/dependency-review-action from 4.3.5 to 4.4.0 +* `#27665 `__: ENH: Re-enable VXE from build targets for sin/cos +* `#27666 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27667 `__: TYP: Allow returning non-array-likes from the ``apply_along_axis``... +* `#27676 `__: CI: Attempt to fix CI on 32 bit linux +* `#27678 `__: DOC: fix incorrect versionadded for np.std +* `#27680 `__: MAINT: fix typo / copy paste error +* `#27681 `__: TYP: Fix some inconsistencies in the scalar methods and properties +* `#27683 `__: TYP: Improve ``np.sum`` and ``np.mean`` return types with given... +* `#27684 `__: DOC: fix spelling of "reality" in ``_nanfunctions_impl.pyi`` +* `#27685 `__: MAINT: Drop useless shebang +* `#27691 `__: TYP: Use ``_typeshed`` to clean up the stubs +* `#27693 `__: MAINT: Update main after 2.1.3 release. +* `#27695 `__: BUG: Fix multiple modules in F2PY and COMMON handling +* `#27702 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.4 to 3.1.0 +* `#27705 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.0 to 2.0.1 +* `#27706 `__: DOC: Remove empty notes +* `#27707 `__: CI: Set up free-threaded CI using quansight-labs/setup-python +* `#27708 `__: DOC: Remove version notes +* `#27714 `__: DOC: fix a mistake in the docstring of vector_norm +* `#27715 `__: BUG: fix incorrect output descriptor in fancy indexing +* `#27716 `__: ENH: Make ``__module__`` attribute coherent across API +* `#27721 `__: DOC: fix name of shape parameter kappa of von Mises distribution +* `#27723 `__: BUG: Allow empty memmaps in most situations +* `#27724 `__: MAINT: Bump github/codeql-action from 3.27.0 to 3.27.1 +* `#27728 `__: BUG: Handle ``--lower`` for F2PY directives and callbacks +* `#27729 `__: BUG: f2py: fix issues with thread-local storage define +* `#27730 `__: TST: Add an F2PY check for exposing variables without functions +* `#27731 `__: BUG: Fix ``fortranname`` for functions +* `#27734 `__: Fix documentation for the chi-square distribution +* `#27735 `__: ENH: Add a ``__dict__`` to ufunc objects and allow overriding... +* `#27736 `__: TYP: Optional ``numpy.number`` type parameters +* `#27742 `__: MAINT: Bump github/codeql-action from 3.27.1 to 3.27.2 +* `#27743 `__: DOC: Fix typos in subclassing documentation +* `#27746 `__: DOC: Added additional guidance for compiling in Windows +* `#27750 `__: TYP: Fix ``ndarray.item()`` and improve ``ndarray.tolist()`` +* `#27753 `__: TYP: Fix the annotations of ``ndarray.real`` and ``ndarray.imag`` +* `#27754 `__: MAINT: Bump github/codeql-action from 3.27.2 to 3.27.3 +* `#27755 `__: TYP: Annotate ``__setitem__`` , ``__contains__`` and ``__iter__``... +* `#27756 `__: TYP: 1-d shape-typing for ``ndarray.flatten`` and ``ravel`` +* `#27757 `__: TYP: Remove the non-existent ``bitwise_count`` methods of ``ndarray``... +* `#27758 `__: TYP: Remove ``ndarray`` binop overloads for ``NDArray[Never]`` +* `#27763 `__: DOC: Note that allow-pickle is not safe also in error +* `#27765 `__: TYP: Shape-typed ``ndarray`` inplace binary operator methods. +* `#27766 `__: MAINT: Bump github/codeql-action from 3.27.3 to 3.27.4 +* `#27767 `__: TYP: Support shape-typing in ``reshape`` and ``resize`` +* `#27769 `__: TYP: Towards a less messy ``__init__.pyi`` +* `#27770 `__: TYP: Fix incorrect baseclass of ``linalg.LinAlgError`` +* `#27771 `__: ENH: ``default_rng`` coerces ``RandomState`` to ``Generator`` +* `#27773 `__: BUG: Fix repeat, accumulate for strings and accumulate API logic +* `#27775 `__: TYP: Fix undefined type-parameter name +* `#27776 `__: TYP: Fix method overload issues in ``ndarray`` and ``generic`` +* `#27778 `__: TYP: Generic ``numpy.generic`` type parameter for the ``item()``... +* `#27779 `__: TYP: Type hints for ``numpy.__config__`` +* `#27788 `__: DOC: Make wording in absolute beginners guide more beginner friendly +* `#27790 `__: TYP: Generic ``timedelta64`` and ``datetime64`` scalar types +* `#27792 `__: TYP: Generic ``numpy.bool`` and statically typed boolean logic +* `#27794 `__: MAINT: Upgrade to spin 0.13 +* `#27795 `__: update pythoncapi-compat to latest HEAD +* `#27800 `__: BUG: Ensure context path is taken in masked array array-wrap +* `#27802 `__: BUG: Ensure that same-kind casting works for uints (mostly) +* `#27803 `__: MAINT: Bump github/codeql-action from 3.27.4 to 3.27.5 +* `#27806 `__: DOC: Improve choice() documentation about return types +* `#27807 `__: BUG,ENH: Fix internal ``__array_wrap__`` for direct calls +* `#27808 `__: ENH: Ensure hugepages are also indicated for calloc allocations +* `#27809 `__: BUG: Fix array flags propagation in boolean indexing +* `#27810 `__: MAINT: Bump actions/dependency-review-action from 4.4.0 to 4.5.0 +* `#27812 `__: BUG: ``timedelta64.__[r]divmod__`` segfaults for incompatible... +* `#27813 `__: DOC: fix broken reference in arrays.classes.rst +* `#27815 `__: DOC: Add a release fragment for gh-14622 +* `#27816 `__: MAINT: Fixup that spin can be installed via conda too now +* `#27817 `__: DEV: changelog: make title processing more robust +* `#27828 `__: CI: skip ninja installation in linux_qemu workflows +* `#27829 `__: CI: update circleci to python3.11.10, limit parallel builds.... +* `#27831 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27843 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27845 `__: BUG: Never negate strides in reductions (for now) +* `#27846 `__: ENH: add matvec and vecmat gufuncs +* `#27852 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27853 `__: REL: Prepare for the NumPy 2.2.0rc1 release [wheel build] +* `#27874 `__: BUG: fix importing numpy in Python's optimized mode (#27868) +* `#27895 `__: DOC: Fix double import in docs (#27878) +* `#27904 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27906 `__: MAINT: Use mask_store instead of store for compiler workaround +* `#27908 `__: MAINT: Update highway from main. +* `#27911 `__: ENH: update __module__ in numpy.random module +* `#27912 `__: ENH: Refactor ``__qualname__`` across API +* `#27913 `__: PERF: improve multithreaded ufunc scaling +* `#27916 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 + diff --git a/doc/changelog/2.2.1-changelog.rst b/doc/changelog/2.2.1-changelog.rst new file mode 100644 index 000000000000..ba3c4f19eb3f --- /dev/null +++ b/doc/changelog/2.2.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer diff --git a/doc/changelog/2.2.2-changelog.rst b/doc/changelog/2.2.2-changelog.rst new file mode 100644 index 000000000000..ac856c97174c --- /dev/null +++ b/doc/changelog/2.2.2-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports diff --git a/doc/changelog/2.2.3-changelog.rst b/doc/changelog/2.2.3-changelog.rst new file mode 100644 index 000000000000..2cb6e99eec51 --- /dev/null +++ b/doc/changelog/2.2.3-changelog.rst @@ -0,0 +1,43 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/neps/conf.py b/doc/neps/conf.py index ea8b5755d340..8331dc94c1c7 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -15,7 +15,6 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -import os from datetime import datetime # import sys # sys.path.insert(0, os.path.abspath('.')) diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst index 849ed874c21b..7392b25f2765 100644 --- a/doc/neps/nep-0021-advanced-indexing.rst +++ b/doc/neps/nep-0021-advanced-indexing.rst @@ -649,7 +649,7 @@ eventualities. Copyright --------- -This document is placed under the CC0 1.0 Universell (CC0 1.0) Public Domain Dedication [1]_. +This document is placed under the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication [1]_. References and footnotes diff --git a/doc/neps/nep-0048-spending-project-funds.rst b/doc/neps/nep-0048-spending-project-funds.rst index f2071587ce28..8e58d1a3ba04 100644 --- a/doc/neps/nep-0048-spending-project-funds.rst +++ b/doc/neps/nep-0048-spending-project-funds.rst @@ -125,7 +125,7 @@ a volunteer in a reasonable amount of time. There are also many tasks, activities, and projects outside of development work that are important and could enhance the project or community - think of, for example, user surveys, translations, outreach, dedicated -mentoring of newcomers, community organizating, website improvements, and +mentoring of newcomers, community organizing, website improvements, and administrative tasks. Time of people to perform tasks is also not the only thing that funds can be diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index fc161ef9629f..aa04dd2c740e 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -4,7 +4,7 @@ NEP 50 — Promotion rules for Python scalars =========================================== :Author: Sebastian Berg -:Status: Draft +:Status: Final :Type: Standards Track :Created: 2021-05-25 @@ -214,7 +214,7 @@ arrays that are not 0-D, such as ``array([2])``. - ``int64(301)`` - *Exception* [T5]_ * - ``uint8(100) + 200`` - - ``int64(301)`` + - ``int64(300)`` - ``uint8(44)`` *and* ``RuntimeWarning`` [T6]_ * - ``float32(1) + 3e100`` - ``float64(3e100)`` diff --git a/doc/neps/nep-0053-c-abi-evolution.rst b/doc/neps/nep-0053-c-abi-evolution.rst index 6abdb1d854cf..16744dc0fde3 100644 --- a/doc/neps/nep-0053-c-abi-evolution.rst +++ b/doc/neps/nep-0053-c-abi-evolution.rst @@ -253,7 +253,7 @@ a user to: yet compatible. The import of ``numpy2_compat`` (and an error when it is missing) will be -inserted by the NumPy eaders as part of the ``import_array()`` call. +inserted by the NumPy headers as part of the ``import_array()`` call. Alternatives ============ diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py index e8ca86e68c13..c00dd7ba36f8 100644 --- a/doc/neps/tools/build_index.py +++ b/doc/neps/tools/build_index.py @@ -19,7 +19,7 @@ def render(tpl_path, context): def nep_metadata(): ignore = ('nep-template.rst') sources = sorted(glob.glob(r'nep-*.rst')) - sources = [s for s in sources if not s in ignore] + sources = [s for s in sources if s not in ignore] meta_re = r':([a-zA-Z\-]*): (.*)' @@ -55,7 +55,7 @@ def nep_metadata(): f' {tags["Title"]!r}') if tags['Status'] in ('Accepted', 'Rejected', 'Withdrawn'): - if not 'Resolution' in tags: + if 'Resolution' not in tags: raise RuntimeError( f'NEP {nr} is Accepted/Rejected/Withdrawn but ' 'has no Resolution tag' @@ -70,7 +70,7 @@ def nep_metadata(): for nr, tags in neps.items(): if tags['Status'] == 'Superseded': - if not 'Replaced-By' in tags: + if 'Replaced-By' not in tags: raise RuntimeError( f'NEP {nr} has been Superseded, but has no Replaced-By tag' ) @@ -78,7 +78,7 @@ def nep_metadata(): replaced_by = int(re.findall(r'\d+', tags['Replaced-By'])[0]) replacement_nep = neps[replaced_by] - if not 'Replaces' in replacement_nep: + if 'Replaces' not in replacement_nep: raise RuntimeError( f'NEP {nr} is superseded by {replaced_by}, but that NEP has ' f"no Replaces tag." diff --git a/doc/postprocess.py b/doc/postprocess.py index 4b48fa443149..a7361cb75ebb 100755 --- a/doc/postprocess.py +++ b/doc/postprocess.py @@ -34,13 +34,13 @@ def process_tex(lines): """ new_lines = [] for line in lines: - if (line.startswith(r'\section{numpy.') - or line.startswith(r'\subsection{numpy.') - or line.startswith(r'\subsubsection{numpy.') - or line.startswith(r'\paragraph{numpy.') - or line.startswith(r'\subparagraph{numpy.') - ): - pass # skip! + if line.startswith(("\\section{numpy.", + "\\subsection{numpy.", + "\\subsubsection{numpy.", + "\\paragraph{numpy.", + "\\subparagraph{numpy.", + )): + pass else: new_lines.append(line) return new_lines diff --git a/doc/preprocess.py b/doc/preprocess.py index 83980bb2fed5..b8f49fbb2c9c 100755 --- a/doc/preprocess.py +++ b/doc/preprocess.py @@ -1,7 +1,5 @@ #!/usr/bin/env python3 -import subprocess import os -import sys from string import Template def main(): diff --git a/doc/release/upcoming_changes/12150.improvement.rst b/doc/release/upcoming_changes/12150.improvement.rst deleted file mode 100644 index f73a6d2aaa28..000000000000 --- a/doc/release/upcoming_changes/12150.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``histogram`` auto-binning now returns bin sizes >=1 for integer input data ---------------------------------------------------------------------------- -For integer input data, bin sizes smaller than 1 result in spurious empty -bins. This is now avoided when the number of bins is computed using one of the -algorithms provided by `histogram_bin_edges`. diff --git a/doc/release/upcoming_changes/26081.improvement.rst b/doc/release/upcoming_changes/26081.improvement.rst deleted file mode 100644 index bac5c197caa0..000000000000 --- a/doc/release/upcoming_changes/26081.improvement.rst +++ /dev/null @@ -1,11 +0,0 @@ -``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` ----------------------------------------------------------------------------------- -Static typing for ``ndarray`` is a long-term effort that continues -with this change. It is a generic type with type parameters for -the shape and the data type. Previously, the shape type parameter could be -any value. This change restricts it to a tuple of ints, as one would expect -from using ``ndarray.shape``. Further, the shape-type parameter has been -changed from invariant to covariant. This change also applies to the subtypes -of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the -`typing docs `_ -for more information. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26103.c_api.rst b/doc/release/upcoming_changes/26103.c_api.rst deleted file mode 100644 index 9d0d998e2dfc..000000000000 --- a/doc/release/upcoming_changes/26103.c_api.rst +++ /dev/null @@ -1,15 +0,0 @@ -API symbols now hidden but customizable ---------------------------------------- -NumPy now defaults to hide the API symbols it adds to allow all NumPy API -usage. -This means that by default you cannot dynamically fetch the NumPy API from -another library (this was never possible on windows). - -If you are experiencing linking errors related to ``PyArray_API`` or -``PyArray_RUNTIME_VERSION``, you can define the -:c:macro:`NPY_API_SYMBOL_ATTRIBUTE` to opt-out of this change. - -If you are experiencing problems due to an upstream header including NumPy, -the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before -their header and import NumPy yourself based on :ref:`including-the-c-api`. - diff --git a/doc/release/upcoming_changes/26268.expired.rst b/doc/release/upcoming_changes/26268.expired.rst deleted file mode 100644 index 932fdbfae6d7..000000000000 --- a/doc/release/upcoming_changes/26268.expired.rst +++ /dev/null @@ -1 +0,0 @@ -* Scalars and 0D arrays are disallowed for `numpy.nonzero` and `numpy.ndarray.nonzero`. diff --git a/doc/release/upcoming_changes/26285.change.rst b/doc/release/upcoming_changes/26285.change.rst deleted file mode 100644 index d652c58dc799..000000000000 --- a/doc/release/upcoming_changes/26285.change.rst +++ /dev/null @@ -1,13 +0,0 @@ -``ma.corrcoef`` may return a slightly different result ------------------------------------------------------- -A pairwise observation approach is currently used in `ma.corrcoef` to -calculate the standard deviations for each pair of variables. This has been -changed as it is being used to normalise the covariance, estimated using -`ma.cov`, which does not consider the observations for each variable in a -pairwise manner, rendering it unnecessary. The normalisation has been -replaced by the more appropriate standard deviation for each variable, -which significantly reduces the wall time, but will return slightly different -estimates of the correlation coefficients in cases where the observations -between a pair of variables are not aligned. However, it will return the same -estimates in all other cases, including returning the same correlation matrix -as `corrcoef` when using a masked array with no masked values. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26285.performance.rst b/doc/release/upcoming_changes/26285.performance.rst deleted file mode 100644 index 79009f662a0f..000000000000 --- a/doc/release/upcoming_changes/26285.performance.rst +++ /dev/null @@ -1,5 +0,0 @@ -``ma.cov`` and ``ma.corrcoef`` are now significantly faster ------------------------------------------------------------ -The private function has been refactored along with `ma.cov` and -`ma.corrcoef`. They are now significantly faster, particularly on large, -masked arrays. \ No newline at end of file diff --git a/doc/release/upcoming_changes/26292.new_feature.rst b/doc/release/upcoming_changes/26292.new_feature.rst deleted file mode 100644 index fc2c33571d77..000000000000 --- a/doc/release/upcoming_changes/26292.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.reshape` and `numpy.ndarray.reshape` now support ``shape`` and ``copy`` arguments. diff --git a/doc/release/upcoming_changes/26313.change.rst b/doc/release/upcoming_changes/26313.change.rst deleted file mode 100644 index 99c8b1d879f9..000000000000 --- a/doc/release/upcoming_changes/26313.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* As `numpy.vecdot` is now a ufunc it has a less precise signature. - This is due to the limitations of ufunc's typing stub. diff --git a/doc/release/upcoming_changes/26388.performance.rst b/doc/release/upcoming_changes/26388.performance.rst deleted file mode 100644 index 2e99f9452c1e..000000000000 --- a/doc/release/upcoming_changes/26388.performance.rst +++ /dev/null @@ -1,3 +0,0 @@ -* `numpy.save` now uses pickle protocol version 4 for saving arrays with - object dtype, which allows for pickle objects larger than 4GB and improves - saving speed by about 5% for large arrays. diff --git a/doc/release/upcoming_changes/26452.deprecation.rst b/doc/release/upcoming_changes/26452.deprecation.rst deleted file mode 100644 index cc4a10bfafee..000000000000 --- a/doc/release/upcoming_changes/26452.deprecation.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The `fix_imports` keyword argument in `numpy.save` is deprecated. Since - NumPy 1.17, `numpy.save` uses a pickle protocol that no longer supports - Python 2, and ignored `fix_imports` keyword. This keyword is kept only - for backward compatibility. It is now deprecated. diff --git a/doc/release/upcoming_changes/26501.new_feature.rst b/doc/release/upcoming_changes/26501.new_feature.rst deleted file mode 100644 index c7465925295c..000000000000 --- a/doc/release/upcoming_changes/26501.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now supports DLPack v1, support for older versions will - be deprecated in the future. diff --git a/doc/release/upcoming_changes/26579.new_function.rst b/doc/release/upcoming_changes/26579.new_function.rst deleted file mode 100644 index 168d12189323..000000000000 --- a/doc/release/upcoming_changes/26579.new_function.rst +++ /dev/null @@ -1,6 +0,0 @@ -New function `numpy.unstack` ----------------------------- - -A new function ``np.unstack(array, axis=...)`` was added, which splits -an array into a tuple of arrays along an axis. It serves as the inverse -of `numpy.stack`. diff --git a/doc/release/upcoming_changes/26580.new_feature.rst b/doc/release/upcoming_changes/26580.new_feature.rst deleted file mode 100644 index c625e9b9d8a2..000000000000 --- a/doc/release/upcoming_changes/26580.new_feature.rst +++ /dev/null @@ -1 +0,0 @@ -* `numpy.asanyarray` now supports ``copy`` and ``device`` arguments, matching `numpy.asarray`. diff --git a/doc/release/upcoming_changes/26611.expired.rst b/doc/release/upcoming_changes/26611.expired.rst deleted file mode 100644 index 1df220d2b2a7..000000000000 --- a/doc/release/upcoming_changes/26611.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` - was stubbed out. diff --git a/doc/release/upcoming_changes/26611.new_feature.rst b/doc/release/upcoming_changes/26611.new_feature.rst deleted file mode 100644 index 6178049cf4ed..000000000000 --- a/doc/release/upcoming_changes/26611.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.printoptions`, `numpy.get_printoptions`, and `numpy.set_printoptions` now support - a new option, ``override_repr``, for defining custom ``repr(array)`` behavior. diff --git a/doc/release/upcoming_changes/26656.improvement.rst b/doc/release/upcoming_changes/26656.improvement.rst deleted file mode 100644 index 66d7508d2738..000000000000 --- a/doc/release/upcoming_changes/26656.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -`np.quantile` with method ``closest_observation`` chooses nearest even order statistic --------------------------------------------------------------------------------------- -This changes the definition of nearest for border cases from the nearest odd -order statistic to nearest even order statistic. The numpy implementation now -matches other reference implementations. diff --git a/doc/release/upcoming_changes/26724.new_feature.rst b/doc/release/upcoming_changes/26724.new_feature.rst deleted file mode 100644 index 3c6a830728a4..000000000000 --- a/doc/release/upcoming_changes/26724.new_feature.rst +++ /dev/null @@ -1,7 +0,0 @@ -* `numpy.cumulative_sum` and `numpy.cumulative_prod` were added as Array API - compatible alternatives for `numpy.cumsum` and `numpy.cumprod`. The new functions - can include a fixed initial (zeros for ``sum`` and ones for ``prod``) in the result. -* `numpy.clip` now supports ``max`` and ``min`` keyword arguments which are meant - to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or ``np.clip(a, None, None)`` - a copy of the input array will be returned instead of raising an error. -* `numpy.astype` now supports ``device`` argument. diff --git a/doc/release/upcoming_changes/26750.improvement.rst b/doc/release/upcoming_changes/26750.improvement.rst deleted file mode 100644 index 858061dbe48a..000000000000 --- a/doc/release/upcoming_changes/26750.improvement.rst +++ /dev/null @@ -1,12 +0,0 @@ -`lapack_lite` is now thread safe --------------------------------- - -NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` -that can be used if no BLAS/LAPACK system is detected at build time. - -Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did -not hit any issues, but running linear algebra operations in multiple threads -could lead to errors, incorrect results, or seg faults due to data races. - -We have added a global lock, serializing access to ``lapack_lite`` in multiple -threads. diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index 923dbe816dd1..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.floor`, `numpy.ceil`, and `numpy.trunc` now won't perform casting - to a floating dtype for integer and boolean dtype input arrays. diff --git a/doc/release/upcoming_changes/26842.c_api.rst b/doc/release/upcoming_changes/26842.c_api.rst deleted file mode 100644 index 7e50dd385006..000000000000 --- a/doc/release/upcoming_changes/26842.c_api.rst +++ /dev/null @@ -1,5 +0,0 @@ -Many shims removed from npy_3kcompat.h --------------------------------------- -Many of the old shims and helper functions were removed from -``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous -version of the file into your codebase. diff --git a/doc/release/upcoming_changes/26846.improvement.rst b/doc/release/upcoming_changes/26846.improvement.rst deleted file mode 100644 index ae9b72d195bf..000000000000 --- a/doc/release/upcoming_changes/26846.improvement.rst +++ /dev/null @@ -1,6 +0,0 @@ -The `numpy.printoptions` context manager is now thread and async-safe ---------------------------------------------------------------------- - -In prior versions of NumPy, the printoptions were defined using a combination -of Python and C global variables. We have refactored so the state is stored in -a python ``ContextVar``, making the context manager thread and async-safe. diff --git a/doc/release/upcoming_changes/26908.c_api.rst b/doc/release/upcoming_changes/26908.c_api.rst deleted file mode 100644 index d6e43591819d..000000000000 --- a/doc/release/upcoming_changes/26908.c_api.rst +++ /dev/null @@ -1,8 +0,0 @@ -New ``PyUFuncObject`` field ``process_core_dims_func`` ------------------------------------------------------- -The field ``process_core_dims_func`` was added to the structure -``PyUFuncObject``. For generalized ufuncs, this field can be set to a -function of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the -ufunc is called. It allows the ufunc author to check that core dimensions -satisfy additional constraints, and to set output core dimension sizes if they -have not been provided. diff --git a/doc/release/upcoming_changes/26981.new_feature.rst b/doc/release/upcoming_changes/26981.new_feature.rst deleted file mode 100644 index f466faeb7590..000000000000 --- a/doc/release/upcoming_changes/26981.new_feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -``f2py`` can generate freethreading-compatible C extensions ------------------------------------------------------------ - -Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C -extension marked as compatible with the free threading CPython -interpreter. Doing so prevents the interpreter from re-enabling the GIL at -runtime when it imports the C extension. Note that ``f2py`` does not analyze -fortran code for thread safety, so you must verify that the wrapped fortran -code is thread safe before marking the extension as compatible. diff --git a/doc/release/upcoming_changes/27076.deprecation.rst b/doc/release/upcoming_changes/27076.deprecation.rst deleted file mode 100644 index f692b814c17d..000000000000 --- a/doc/release/upcoming_changes/27076.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Passing non-integer inputs as the first argument of `bincount` is now - deprecated, because such inputs are silently cast to integers with no - warning about loss of precision. diff --git a/doc/release/upcoming_changes/27091.change.rst b/doc/release/upcoming_changes/27091.change.rst deleted file mode 100644 index 5b71692efabd..000000000000 --- a/doc/release/upcoming_changes/27091.change.rst +++ /dev/null @@ -1,24 +0,0 @@ -Cast-safety fixes in ``copyto`` and ``full`` --------------------------------------------- -``copyto`` now uses NEP 50 correctly and applies this to its cast safety. -Python integer to NumPy integer casts and Python float to NumPy float casts -are now considered "safe" even if assignment may fail or precision may be lost. -This means the following examples change slightly: - -* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast - of the Python integer. It will now always raise, to achieve an unsafe cast - you must pass an array or NumPy scalar. -* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError - rather than a TypeError due to same-kind casting. -* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` - (float32 cannot hold ``1e300``) rather raising a TypeError. - -Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), -meaning that the following behaves differently: - -* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. -* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. - Previously, NumPy checked whether the 100 fits the ``int8_arr``. - -This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 -behavior. \ No newline at end of file diff --git a/doc/source/building/blas_lapack.rst b/doc/source/building/blas_lapack.rst index 73ab4ac301aa..c00b3646d84e 100644 --- a/doc/source/building/blas_lapack.rst +++ b/doc/source/building/blas_lapack.rst @@ -96,7 +96,7 @@ Full list of BLAS and LAPACK related build options -------------------------------------------------- BLAS and LAPACK are complex dependencies. Some libraries have more options that -are exposed via build options (see ``meson_options.txt`` in the root of the +are exposed via build options (see ``meson.options`` in the root of the repo for all of NumPy's build options). - ``blas``: name of the BLAS library to use (default: ``auto``), diff --git a/doc/source/building/cross_compilation.rst b/doc/source/building/cross_compilation.rst index a162eb1d2f1a..82b896a8935c 100644 --- a/doc/source/building/cross_compilation.rst +++ b/doc/source/building/cross_compilation.rst @@ -24,7 +24,7 @@ may need to pass to Meson to successfully cross compile. One possible hiccup is that the build requires running a compiled executable in order to determine the ``long double`` format for the host platform. This may be -an obstable, since it requires ``crossenv`` or QEMU to run the host (cross) +an obstacle, since it requires ``crossenv`` or QEMU to run the host (cross) Python. To avoid this problem, specify the paths to the relevant directories in your *cross file*: diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index 54a58a7999d8..a4b061914a2b 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -161,7 +161,8 @@ your system. This is needed even if you use the MinGW-w64 or Intel compilers, in order to ensure you have the Windows Universal C Runtime (the other components of Visual Studio are not needed when using Mingw-w64, and can be deselected if - desired, to save disk space). + desired, to save disk space). The recommended version of the UCRT is + >= 10.0.22621.0. .. tab-set:: @@ -174,6 +175,12 @@ your system. run a ``.bat`` file for the correct bitness and architecture (e.g., for 64-bit Intel CPUs, use ``vcvars64.bat``). + If using a Conda environment while a version of Visual Studio 2019+ is + installed that includes the MSVC v142 package (VS 2019 C++ x86/x64 + build tools), activating the conda environment should cause Visual + Studio to be found and the appropriate .bat file executed to set + these variables. + For detailed guidance, see `Use the Microsoft C++ toolset from the command line `__. @@ -224,7 +231,7 @@ Otherwise, conda is recommended. .. note:: If you don't have a conda installation yet, we recommend using - Mambaforge_; any conda flavor will work though. + Miniforge_; any conda flavor will work though. Building from source to use NumPy ````````````````````````````````` @@ -256,6 +263,12 @@ Building from source to use NumPy git submodule update --init pip install . --no-build-isolation + .. warning:: + + On Windows, the AR, LD, and LDFLAGS environment variables may be set, + which will cause the pip install command to fail. These variables are only + needed for flang and can be safely unset prior to running pip install. + .. tab-item:: Virtual env or system Python :sync: pip @@ -363,6 +376,13 @@ like build the html documentation or running benchmarks. The ``spin`` interface is self-documenting, so please see ``spin --help`` and ``spin --help`` for detailed guidance. +.. warning:: + + In an activated conda enviroment on Windows, the AR, LD, and LDFLAGS + environment variables may be set, which will cause the build to fail. + These variables are only needed for flang and can be safely unset + for build. + .. _meson-editable-installs: .. admonition:: IDE support & editable installs @@ -432,5 +452,5 @@ Background information distutils_equivalents -.. _Mambaforge: https://github.com/conda-forge/miniforge#mambaforge +.. _Miniforge: https://github.com/conda-forge/miniforge .. _meson-python: https://mesonbuild.com/meson-python/ diff --git a/doc/source/conf.py b/doc/source/conf.py index 2019529cb53b..3d093bdec433 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -575,7 +575,7 @@ def linkcode_resolve(domain, info): numpy.__version__, fn, linespec) from pygments.lexers import CLexer -from pygments.lexer import inherit, bygroups +from pygments.lexer import inherit from pygments.token import Comment class NumPyLexer(CLexer): diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index d2846f48b833..dfa84a1f6331 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -19,6 +19,21 @@ we list them in alphabetical order): - Website design and development - Writing technical documentation +We understand that everyone has a different level of experience, +also NumPy is a pretty well-established project, so it's hard to +make assumptions about an ideal "first-time-contributor". +So, that's why we don't mark issues with the "good-first-issue" +label. Instead, you'll find `issues labeled "Sprintable" `__. +These issues can either be: + +- **Easily fixed** when you have guidance from an experienced + contributor (perfect for working in a sprint). +- **A learning opportunity** for those ready to dive deeper, + even if you're not in a sprint. + +Additionally, depending on your prior experience, some "Sprintable" +issues might be easy, while others could be more challenging for you. + The rest of this document discusses working on the NumPy code base and documentation. We're in the process of updating our descriptions of other activities and roles. If you are interested in these other activities, please contact us! diff --git a/doc/source/f2py/f2py-testing.rst b/doc/source/f2py/f2py-testing.rst index c6680749c7c5..687b414975ee 100644 --- a/doc/source/f2py/f2py-testing.rst +++ b/doc/source/f2py/f2py-testing.rst @@ -45,7 +45,7 @@ class present in ``util.py``. This class many helper functions for parsing and compiling test source files. Its child classes can override its ``sources`` data member to provide their own source files. -This superclass will then compile the added source files upon object creation andtheir +This superclass will then compile the added source files upon object creation and their functions will be appended to ``self.module`` data member. Thus, the child classes will be able to access the fortran functions specified in source file by calling ``self.module.[fortran_function_name]``. diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst index 54f74f02b6bf..8c68b6e03e2e 100644 --- a/doc/source/f2py/python-usage.rst +++ b/doc/source/f2py/python-usage.rst @@ -243,6 +243,13 @@ In Python: .. literalinclude:: ./code/results/extcallback_session.dat :language: python +.. note:: + + When using modified Fortran code via ``callstatement`` or other directives, + the wrapped Python function must be called as a callback, otherwise only the + bare Fortran routine will be used. For more details, see + https://github.com/numpy/numpy/issues/26681#issuecomment-2466460943 + Resolving arguments to call-back functions ------------------------------------------ diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 2ff49b162fe4..55d4696a114d 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -220,6 +220,19 @@ using the NumPy types. You can still write cython code using the ``c.real`` and ``c.imag`` attributes (using the native typedefs), but you can no longer use in-place operators ``c.imag += 1`` in Cython's c++ mode. +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 3b2d0c4b2a02..593d5541877b 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -52,8 +52,6 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array_ufunc__(ufunc, method, *inputs, **kwargs) - .. versionadded:: 1.13 - Any class, ndarray subclass or not, can define this method or set it to None in order to override the behavior of NumPy's ufuncs. This works quite similarly to Python's ``__mul__`` and other binary operation routines. @@ -156,8 +154,6 @@ NumPy provides several hooks that classes can customize: .. py:method:: class.__array_function__(func, types, args, kwargs) - .. versionadded:: 1.16 - - ``func`` is an arbitrary callable exposed by NumPy's public API, which was called in the form ``func(*args, **kwargs)``. - ``types`` is a collection :py:class:`collections.abc.Collection` @@ -292,7 +288,7 @@ NumPy provides several hooks that classes can customize: .. note:: It is hoped to eventually deprecate this method in favour of - func:`__array_ufunc__` for ufuncs (and :func:`__array_function__` + :func:`__array_ufunc__` for ufuncs (and :func:`__array_function__` for a few other functions like :func:`numpy.squeeze`). .. py:attribute:: class.__array_priority__ diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index a63fbdc6a910..2d10120c41f3 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -6,8 +6,6 @@ Datetimes and timedeltas ************************ -.. versionadded:: 1.7.0 - Starting in NumPy 1.7, there are core array data types which natively support datetime functionality. The data type is called :class:`datetime64`, so named because :class:`~datetime.datetime` is already taken by the Python standard library. diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index d03ebde361a2..5e0c43438f03 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -467,11 +467,11 @@ Truth value of an array (:class:`bool() `): Truth-value testing of an array invokes :meth:`ndarray.__bool__`, which raises an error if the number of - elements in the array is larger than 1, because the truth value + elements in the array is not 1, because the truth value of such arrays is ambiguous. Use :meth:`.any() ` and :meth:`.all() ` instead to be clear about what is meant - in such cases. (If the number of elements is 0, the array evaluates - to ``False``.) + in such cases. (If you wish to check for whether an array is empty, + use for example ``.size > 0``.) Unary operations: diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst index cd476815f55c..f38f2d5eb9c5 100644 --- a/doc/source/reference/arrays.promotion.rst +++ b/doc/source/reference/arrays.promotion.rst @@ -149,7 +149,7 @@ Note the following specific rules and observations: 1. When a Python ``float`` or ``complex`` interacts with a NumPy integer the result will be ``float64`` or ``complex128`` (yellow border). - NumPy booleans will also be cast to the default integer.[#default-int] + NumPy booleans will also be cast to the default integer [#default-int]_. This is not relevant when additionally NumPy floating point values are involved. 2. The precision is drawn such that ``float16 < int16 < uint16`` because @@ -172,7 +172,7 @@ would give. Behavior of ``sum`` and ``prod`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**``np.sum`` and ``np.prod``:** Will alway return the default integer type +``np.sum`` and ``np.prod`` will always return the default integer type when summing over integer values (or booleans). This is usually an ``int64``. The reason for this is that integer summations are otherwise very likely to overflow and give confusing results. @@ -214,7 +214,7 @@ The following rules apply: non-ascii characters. * For some purposes NumPy will promote almost any other datatype to strings. This applies to array creation or concatenation. -* The array constructers like ``np.array()`` will use ``object`` dtype when +* The array constructors like ``np.array()`` will use ``object`` dtype when there is no viable promotion. * Structured dtypes can promote when their field names and order matches. In that case all fields are promoted individually. @@ -247,7 +247,7 @@ could drastically slow down evaluation. .. [#hist-reasons] To a large degree, this may just be for choices made early - on in NumPy's predecessors. For more details, see `NEP 50 `. + on in NumPy's predecessors. For more details, see :ref:`NEP 50 `. .. [#NEP50] See also :ref:`NEP 50 ` which changed the rules for NumPy 2.0. Previous versions of NumPy would sometimes return higher diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 68fbb6ef3d66..aface4e9e56f 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -66,15 +66,11 @@ and its sub-types). .. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags) - .. versionadded:: 1.7 - Enables the specified array flags. This function does no validation, and assumes that you know what you're doing. .. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags) - .. versionadded:: 1.7 - Clears the specified array flags. This function does no validation, and assumes that you know what you're doing. @@ -97,8 +93,6 @@ and its sub-types). .. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr) - .. versionadded:: 1.7 - A synonym for :c:func:`PyArray_DIMS`, named to be consistent with the `shape ` usage within Python. @@ -157,8 +151,6 @@ and its sub-types). .. c:function:: PyArray_Descr *PyArray_DTYPE(PyArrayObject* arr) - .. versionadded:: 1.7 - A synonym for PyArray_DESCR, named to be consistent with the 'dtype' usage within Python. @@ -275,8 +267,6 @@ From scratch PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, \ int subok) - .. versionadded:: 1.6 - This function steals a reference to *descr* if it is not NULL. This array creation routine allows for the convenient creation of a new array matching an existing array's shapes and memory layout, @@ -406,8 +396,6 @@ From scratch .. c:function:: int PyArray_SetBaseObject(PyArrayObject* arr, PyObject* obj) - .. versionadded:: 1.7 - This function **steals a reference** to ``obj`` and sets it as the base property of ``arr``. @@ -688,7 +676,7 @@ From other objects Encapsulate the functionality of functions and methods that take the axis= keyword and work properly with None as the axis argument. The input array is ``obj``, while ``*axis`` is a - converted integer (so that >=MAXDIMS is the None value), and + converted integer (so that ``*axis == NPY_RAVEL_AXIS`` is the None value), and ``requirements`` gives the needed properties of ``obj``. The output is a converted version of the input so that requirements are met and if needed a flattening has occurred. On output @@ -934,8 +922,6 @@ argument must be a :c:expr:`PyObject *` that can be directly interpreted as a called on flexible dtypes. Types that are attached to an array will always be sized, hence the array form of this macro not existing. - .. versionchanged:: 1.18 - For structured datatypes with no fields this function now returns False. .. c:function:: int PyTypeNum_ISUSERDEF(int num) @@ -1065,8 +1051,6 @@ Converting data types .. c:function:: int PyArray_CanCastTypeTo( \ PyArray_Descr* fromtype, PyArray_Descr* totype, NPY_CASTING casting) - .. versionadded:: 1.6 - Returns non-zero if an array of data type *fromtype* (which can include flexible types) can be cast safely to an array of data type *totype* (which can include flexible types) according to @@ -1081,23 +1065,18 @@ Converting data types .. c:function:: int PyArray_CanCastArrayTo( \ PyArrayObject* arr, PyArray_Descr* totype, NPY_CASTING casting) - .. versionadded:: 1.6 - Returns non-zero if *arr* can be cast to *totype* according to the casting rule given in *casting*. If *arr* is an array scalar, its value is taken into account, and non-zero is also returned when the value will not overflow or be truncated to an integer when converting to a smaller type. - This is almost the same as the result of - PyArray_CanCastTypeTo(PyArray_MinScalarType(arr), totype, casting), - but it also handles a special case arising because the set - of uint values is not a subset of the int values for types with the - same number of bits. - .. c:function:: PyArray_Descr* PyArray_MinScalarType(PyArrayObject* arr) - .. versionadded:: 1.6 + .. note:: + With the adoption of NEP 50 in NumPy 2, this function is not used + internally. It is currently provided for backwards compatibility, + but expected to be eventually deprecated. If *arr* is an array, returns its data type descriptor, but if *arr* is an array scalar (has 0 dimensions), it finds the data type @@ -1111,8 +1090,6 @@ Converting data types .. c:function:: PyArray_Descr* PyArray_PromoteTypes( \ PyArray_Descr* type1, PyArray_Descr* type2) - .. versionadded:: 1.6 - Finds the data type of smallest size and kind to which *type1* and *type2* may be safely converted. This function is symmetric and associative. A string or unicode result will be the proper size for @@ -1122,8 +1099,6 @@ Converting data types npy_intp narrs, PyArrayObject **arrs, npy_intp ndtypes, \ PyArray_Descr **dtypes) - .. versionadded:: 1.6 - This applies type promotion to all the input arrays and dtype objects, using the NumPy rules for combining scalars and arrays, to determine the output type for an operation with the given set of @@ -1134,8 +1109,7 @@ Converting data types .. c:function:: int PyArray_ObjectType(PyObject* op, int mintype) - This function is superseded by :c:func:`PyArray_MinScalarType` and/or - :c:func:`PyArray_ResultType`. + This function is superseded by :c:func:`PyArray_ResultType`. This function is useful for determining a common type that two or more arrays can be converted to. It only works for non-flexible @@ -1163,11 +1137,6 @@ Converting data types ``DECREF`` 'd or a memory-leak will occur. The example template-code below shows a typical usage: - .. versionchanged:: 1.18.0 - A mix of scalars and zero-dimensional arrays now produces a type - capable of holding the scalar value. - Previously priority was given to the dtype of the arrays. - .. code-block:: c mps = PyArray_ConvertToCommonType(obj, &n); @@ -1264,6 +1233,13 @@ User-defined data types registered (checked only by the address of the pointer), then return the previously-assigned type-number. + The number of user DTypes known to numpy is stored in + ``NPY_NUMUSERTYPES``, a static global variable that is public in the + C API. Accessing this symbol is inherently *not* thread-safe. If + for some reason you need to use this API in a multithreaded context, + you will need to add your own locking, NumPy does not ensure new + data types can be added in a thread-safe manner. + .. c:function:: int PyArray_RegisterCastFunc( \ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) @@ -2416,8 +2392,6 @@ Item selection and manipulation .. c:function:: npy_intp PyArray_CountNonzero(PyArrayObject* self) - .. versionadded:: 1.6 - Counts the number of non-zero elements in the array object *self*. .. c:function:: PyObject* PyArray_Nonzero(PyArrayObject* self) @@ -2677,8 +2651,6 @@ Array Functions .. c:function:: PyObject* PyArray_MatrixProduct2( \ PyObject* obj1, PyObject* obj, PyArrayObject* out) - .. versionadded:: 1.6 - Same as PyArray_MatrixProduct, but store the result in *out*. The output array must have the correct shape, type, and be C-contiguous, or an exception is raised. @@ -2688,8 +2660,6 @@ Array Functions PyArray_Descr* dtype, NPY_ORDER order, NPY_CASTING casting, \ PyArrayObject* out) - .. versionadded:: 1.6 - Applies the Einstein summation convention to the array operands provided, returning a new array or placing the result in *out*. The string in *subscripts* is a comma separated list of index @@ -2781,8 +2751,6 @@ Other functions Auxiliary data with object semantics ------------------------------------ -.. versionadded:: 1.7.0 - .. c:type:: NpyAuxData When working with more complex dtypes which are composed of other dtypes, @@ -2915,7 +2883,7 @@ of this useful approach to looping over an array from C. .. c:function:: void PyArray_ITER_NEXT(PyObject* iterator) - Incremement the index and the dataptr members of the *iterator* to + Increment the index and the dataptr members of the *iterator* to point to the next element of the array. If the array is not (C-style) contiguous, also increment the N-dimensional coordinates array. @@ -3064,8 +3032,6 @@ Broadcasting (multi-iterators) Neighborhood iterator --------------------- -.. versionadded:: 1.4.0 - Neighborhood iterators are subclasses of the iterator object, and can be used to iter over a neighborhood of a point. For example, you may want to iterate over every voxel of a 3d image, and for every such voxel, iterate over an @@ -3243,30 +3209,18 @@ Array scalars .. c:function:: NPY_SCALARKIND PyArray_ScalarKind( \ int typenum, PyArrayObject** arr) - See the function :c:func:`PyArray_MinScalarType` for an alternative - mechanism introduced in NumPy 1.6.0. + Legacy way to query special promotion for scalar values. This is not + used in NumPy itself anymore and is expected to be deprecated eventually. - Return the kind of scalar represented by *typenum* and the array - in *\*arr* (if *arr* is not ``NULL`` ). The array is assumed to be - rank-0 and only used if *typenum* represents a signed integer. If - *arr* is not ``NULL`` and the first element is negative then - :c:data:`NPY_INTNEG_SCALAR` is returned, otherwise - :c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values - are the enumerated values in :c:type:`NPY_SCALARKIND`. + New DTypes can define promotion rules specific to Python scalars. .. c:function:: int PyArray_CanCoerceScalar( \ char thistype, char neededtype, NPY_SCALARKIND scalar) - See the function :c:func:`PyArray_ResultType` for details of - NumPy type promotion, updated in NumPy 1.6.0. + Legacy way to query special promotion for scalar values. This is not + used in NumPy itself anymore and is expected to be deprecated eventually. - Implements the rules for scalar coercion. Scalars are only - silently coerced from thistype to neededtype if this function - returns nonzero. If scalar is :c:data:`NPY_NOSCALAR`, then this - function is equivalent to :c:func:`PyArray_CanCastSafely`. The rule is - that scalars of the same KIND can be coerced into arrays of the - same KIND. This rule means that high-precision scalars will never - cause low-precision arrays of the same KIND to be upcast. + Use ``PyArray_ResultType`` for similar purposes. Data-type descriptors @@ -4076,8 +4030,6 @@ extension with the lowest :c:data:`NPY_FEATURE_VERSION` as possible. .. c:function:: unsigned int PyArray_GetNDArrayCFeatureVersion(void) - .. versionadded:: 1.4.0 - This just returns the value :c:data:`NPY_FEATURE_VERSION`. :c:data:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a function is added). A changed value does not always require a recompile. @@ -4092,8 +4044,8 @@ Memory management .. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes) - Macros to allocate, free, and reallocate memory. These macros are used - internally to create arrays. + Functions to allocate, free, and reallocate memory. These are used + internally to manage array data memory unless overridden. .. c:function:: npy_intp* PyDimMem_NEW(int nd) @@ -4474,8 +4426,6 @@ Enumerated Types .. c:enum:: NPY_CASTING - .. versionadded:: 1.6 - An enumeration type indicating how permissive data conversions should be. This is used by the iterator added in NumPy 1.6, and is intended to be used more broadly in a future version. diff --git a/doc/source/reference/c-api/config.rst b/doc/source/reference/c-api/config.rst index 097eba9b7089..939beeefd666 100644 --- a/doc/source/reference/c-api/config.rst +++ b/doc/source/reference/c-api/config.rst @@ -78,8 +78,6 @@ Platform information .. c:macro:: NPY_CPU_S390 .. c:macro:: NPY_CPU_PARISC - .. versionadded:: 1.3.0 - CPU architecture of the platform; only one of the above is defined. @@ -91,8 +89,6 @@ Platform information .. c:macro:: NPY_BYTE_ORDER - .. versionadded:: 1.3.0 - Portable alternatives to the ``endian.h`` macros of GNU Libc. If big endian, :c:data:`NPY_BYTE_ORDER` == :c:data:`NPY_BIG_ENDIAN`, and similarly for little endian architectures. @@ -101,8 +97,6 @@ Platform information .. c:function:: int PyArray_GetEndianness() - .. versionadded:: 1.3.0 - Returns the endianness of the current platform. One of :c:data:`NPY_CPU_BIG`, :c:data:`NPY_CPU_LITTLE`, or :c:data:`NPY_CPU_UNKNOWN_ENDIAN`. diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index f8e0efb34d24..c07abb47bc10 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -185,8 +185,6 @@ Those can be useful for precise floating point comparison. * NPY_FPE_UNDERFLOW * NPY_FPE_INVALID - .. versionadded:: 1.15.0 - .. c:function:: int npy_clear_floatstatus() Clears the floating point status. Returns the previous status mask. @@ -201,8 +199,6 @@ Those can be useful for precise floating point comparison. prevent aggressive compiler optimizations from reordering this function call. Returns the previous status mask. - .. versionadded:: 1.15.0 - .. _complex-numbers: Support for complex numbers diff --git a/doc/source/reference/c-api/datetimes.rst b/doc/source/reference/c-api/datetimes.rst index 5e344c7c1b74..34fc81ed1351 100644 --- a/doc/source/reference/c-api/datetimes.rst +++ b/doc/source/reference/c-api/datetimes.rst @@ -194,7 +194,7 @@ Conversion functions Returns the string length to use for converting datetime objects with the given local time and unit settings to strings. - Use this when constructings strings to supply to + Use this when constructing strings to supply to ``NpyDatetime_MakeISO8601Datetime``. .. c:function:: int NpyDatetime_MakeISO8601Datetime(\ diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index ce23c51aa9ea..43869d5b4c55 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -1,3 +1,5 @@ + + Data type API ============= diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst index 50fbec96392a..817bcad7e4a2 100644 --- a/doc/source/reference/c-api/iterator.rst +++ b/doc/source/reference/c-api/iterator.rst @@ -7,8 +7,6 @@ Array iterator API pair: iterator; C-API pair: C-API; iterator -.. versionadded:: 1.6 - Array iterator -------------- @@ -639,8 +637,6 @@ Construction and destruction .. c:macro:: NPY_ITER_ARRAYMASK - .. versionadded:: 1.7 - Indicates that this operand is the mask to use for selecting elements when writing to operands which have the :c:data:`NPY_ITER_WRITEMASKED` flag applied to them. @@ -663,8 +659,6 @@ Construction and destruction .. c:macro:: NPY_ITER_WRITEMASKED - .. versionadded:: 1.7 - This array is the mask for all `writemasked ` operands. Code uses the ``writemasked`` flag which indicates that only elements where the chosen ARRAYMASK operand is True @@ -1127,8 +1121,6 @@ Construction and destruction .. c:function:: npy_bool NpyIter_IsFirstVisit(NpyIter* iter, int iop) - .. versionadded:: 1.7 - Checks to see whether this is the first time the elements of the specified reduction operand which the iterator points at are being seen for the first time. The function returns a reasonable answer diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index 8d57153d8803..4565e602193f 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -1611,3 +1611,29 @@ for completeness and assistance in understanding the code. ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +`#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index e0ab1bb2a7ba..e66c86faf1b3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -1,14 +1,13 @@ .. _global_state: -************ -Global state -************ - -NumPy has a few import-time, compile-time, or runtime options -which change the global behaviour. -Most of these are related to performance or for debugging -purposes and will not be interesting to the vast majority -of users. +**************************** +Global Configuration Options +**************************** + +NumPy has a few import-time, compile-time, or runtime configuration +options which change the global behaviour. Most of these are related to +performance or for debugging purposes and will not be interesting to the +vast majority of users. Performance-related options diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index ed9641409014..02e3248953fb 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -58,6 +58,7 @@ Other topics array_api simd/index + thread_safety global_state security distutils_status_migration diff --git a/doc/source/reference/random/c-api.rst b/doc/source/reference/random/c-api.rst index 2819c769cb44..ba719b799866 100644 --- a/doc/source/reference/random/c-api.rst +++ b/doc/source/reference/random/c-api.rst @@ -3,8 +3,6 @@ C API for random .. currentmodule:: numpy.random -.. versionadded:: 1.19.0 - Access to various distributions below is available via Cython or C-wrapper libraries like CFFI. All the functions accept a :c:type:`bitgen_t` as their first argument. To access these from Cython or C, you must link with the diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 976a03a9a449..77d39d0e771f 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -65,7 +65,6 @@ arbitrary 128-bit integer. >>> import numpy as np >>> import secrets - >>> import numpy as np >>> secrets.randbits(128) #doctest: +SKIP 122807528840384100672342137672332424406 # may vary >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py index 794142836652..39a8ba7bc118 100644 --- a/doc/source/reference/random/performance.py +++ b/doc/source/reference/random/performance.py @@ -59,7 +59,7 @@ table = table.T table = table.reindex(columns) table = table.T -table = table.reindex([k for k in funcs], axis=0) +table = table.reindex(list(funcs), axis=0) print(table.to_csv(float_format='%0.1f')) diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index 619458de8224..5a2b30b8b0d9 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -18,7 +18,6 @@ Changing array shape .. autosummary:: :toctree: generated/ - reshape ravel ndarray.flat @@ -119,6 +118,5 @@ Rearranging elements flip fliplr flipud - reshape roll rot90 diff --git a/doc/source/reference/routines.emath.rst b/doc/source/reference/routines.emath.rst index 1ee835c0f8ee..7751c922b677 100644 --- a/doc/source/reference/routines.emath.rst +++ b/doc/source/reference/routines.emath.rst @@ -3,9 +3,37 @@ Mathematical functions with automatic domain ******************************************** -.. currentmodule:: numpy +.. currentmodule:: numpy.emath -.. note:: :mod:`numpy.emath` is a preferred alias for ``numpy.lib.scimath``, +.. note:: ``numpy.emath`` is a preferred alias for ``numpy.lib.scimath``, available after :mod:`numpy` is imported. -.. automodule:: numpy.emath +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions +are correctly handled. See their respective docstrings for specific examples. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + arccos + arcsin + arctanh + log + log2 + logn + log10 + power + sqrt diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst index ae9eb629d919..d4fd7f9e0677 100644 --- a/doc/source/reference/routines.linalg.rst +++ b/doc/source/reference/routines.linalg.rst @@ -62,6 +62,8 @@ Matrix and vector products outer matmul linalg.matmul (Array API compatible location) + matvec + vecmat tensordot linalg.tensordot (Array API compatible location) einsum @@ -139,8 +141,6 @@ Exceptions Linear algebra on several matrices at once ------------------------------------------ -.. versionadded:: 1.8.0 - Several of the linear algebra routines listed above are able to compute results for several matrices at once, if they are stacked into the same array. diff --git a/doc/source/reference/routines.polynomials.chebyshev.rst b/doc/source/reference/routines.polynomials.chebyshev.rst index 087b7beb9f06..3256bd52b9cd 100644 --- a/doc/source/reference/routines.polynomials.chebyshev.rst +++ b/doc/source/reference/routines.polynomials.chebyshev.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.4.0 - .. automodule:: numpy.polynomial.chebyshev :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.hermite.rst b/doc/source/reference/routines.polynomials.hermite.rst index c881d9aaf1ea..30c81fb04628 100644 --- a/doc/source/reference/routines.polynomials.hermite.rst +++ b/doc/source/reference/routines.polynomials.hermite.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.hermite :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.hermite_e.rst b/doc/source/reference/routines.polynomials.hermite_e.rst index bfcb900c8782..edfbee25ffc4 100644 --- a/doc/source/reference/routines.polynomials.hermite_e.rst +++ b/doc/source/reference/routines.polynomials.hermite_e.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.hermite_e :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.laguerre.rst b/doc/source/reference/routines.polynomials.laguerre.rst index 68c44630077c..35cd84ff9b0b 100644 --- a/doc/source/reference/routines.polynomials.laguerre.rst +++ b/doc/source/reference/routines.polynomials.laguerre.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.laguerre :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.legendre.rst b/doc/source/reference/routines.polynomials.legendre.rst index e10065b4d5fe..0bf91647ab4e 100644 --- a/doc/source/reference/routines.polynomials.legendre.rst +++ b/doc/source/reference/routines.polynomials.legendre.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.6.0 - .. automodule:: numpy.polynomial.legendre :no-members: :no-inherited-members: diff --git a/doc/source/reference/routines.polynomials.polynomial.rst b/doc/source/reference/routines.polynomials.polynomial.rst index 71000a60db2c..5784b80a2787 100644 --- a/doc/source/reference/routines.polynomials.polynomial.rst +++ b/doc/source/reference/routines.polynomials.polynomial.rst @@ -1,5 +1,3 @@ -.. versionadded:: 1.4.0 - .. automodule:: numpy.polynomial.polynomial :no-members: :no-inherited-members: diff --git a/doc/source/reference/simd/build-options.rst b/doc/source/reference/simd/build-options.rst index b4daf09a5b42..8dba69f7c744 100644 --- a/doc/source/reference/simd/build-options.rst +++ b/doc/source/reference/simd/build-options.rst @@ -203,7 +203,7 @@ Behaviors # is equivalent to python -m build --wheel -Csetup-args=-Dcpu-baseline="sse sse2 sse3 ssse3 sse41 popcnt sse42" -- ``cpu-dispatch`` does not combain any of implied CPU features, +- ``cpu-dispatch`` does not combine any of implied CPU features, so you must add them unless you want to disable one or all of them:: # Only dispatches AVX2 and FMA3 diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py index b141e23d0dd7..5f022a91da38 100644 --- a/doc/source/reference/simd/gen_features.py +++ b/doc/source/reference/simd/gen_features.py @@ -1,7 +1,7 @@ """ Generate CPU features tables from CCompilerOpt """ -from os import sys, path +from os import path from numpy.distutils.ccompiler_opt import CCompilerOpt class FakeCCompilerOpt(CCompilerOpt): diff --git a/doc/source/reference/simd/how-it-works.rst b/doc/source/reference/simd/how-it-works.rst index 3704efa66147..67fe519ca17d 100644 --- a/doc/source/reference/simd/how-it-works.rst +++ b/doc/source/reference/simd/how-it-works.rst @@ -201,7 +201,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as: #define NPY__CPU_TARGET_AVX2 #define NPY__CPU_TARGET_AVX512F // our dispatch-able source - #include "/the/absuolate/path/of/hello.dispatch.c" + #include "/the/absolute/path/of/hello.dispatch.c" - **(D) Dispatch-able configuration header**: The infrastructure generates a config header for each dispatch-able source, this header @@ -234,7 +234,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as: // the additional optimizations, so it could be SSE42 or AVX512F #define CURRENT_TARGET(X) NPY_CAT(NPY_CAT(X, _), NPY__CPU_TARGET_CURRENT) #endif - // Macro 'CURRENT_TARGET' adding the current target as suffux to the exported symbols, + // Macro 'CURRENT_TARGET' adding the current target as suffix to the exported symbols, // to avoid linking duplications, NumPy already has a macro called // 'NPY_CPU_DISPATCH_CURFX' similar to it, located at // numpy/numpy/_core/src/common/npy_cpu_dispatch.h diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst new file mode 100644 index 000000000000..84590bfac39c --- /dev/null +++ b/doc/source/reference/thread_safety.rst @@ -0,0 +1,51 @@ +.. _thread_safety: + +************* +Thread Safety +************* + +NumPy supports use in a multithreaded context via the `threading` module in the +standard library. Many NumPy operations release the GIL, so unlike many +situations in Python, it is possible to improve parallel performance by +exploiting multithreaded parallelism in Python. + +The easiest performance gains happen when each worker thread owns its own array +or set of array objects, with no data directly shared between threads. Because +NumPy releases the GIL for many low-level operations, threads that spend most of +the time in low-level code will run in parallel. + +It is possible to share NumPy arrays between threads, but extreme care must be +taken to avoid creating thread safety issues when mutating arrays that are +shared between multiple threads. If two threads simultaneously read from and +write to the same array, they will at best produce inconsistent, racey results that +are not reproducible, let alone correct. It is also possible to crash the Python +interpreter by, for example, resizing an array while another thread is reading +from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make writing multithreaded +algorithms using NumPy arrays safer, but for now we suggest focusing on +read-only access of arrays that are shared between threads, or adding your own +locking if you need to mutation and multithreading. + +Note that operations that *do not* release the GIL will see no performance gains +from use of the `threading` module, and instead might be better served with +`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do +not release the GIL. + +Free-threaded Python +-------------------- + +.. versionadded:: 2.1 + +Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support +for python runtimes with the GIL disabled. See +https://py-free-threading.github.io for more information about installing and +using free-threaded Python, as well as information about supporting it in +libraries that depend on NumPy. + +Because free-threaded Python does not have a global interpreter lock to +serialize access to Python objects, there are more opportunities for threads to +mutate shared state and create thread safety issues. In addition to the +limitations about locking of the ndarray object noted above, this also means +that arrays with ``dtype=object`` are not protected by the GIL, creating data +races for python objects that are not possible outside free-threaded python. diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 6df29817b0d8..f1fed6f5624a 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -40,14 +40,10 @@ advanced usage and will not typically be used. .. rubric:: *out* -.. versionadded:: 1.6 - The first output can be provided as either a positional or a keyword parameter. Keyword 'out' arguments are incompatible with positional ones. -.. versionadded:: 1.10 - The 'out' keyword argument is expected to be a tuple with one entry per output (which can be None for arrays to be allocated by the ufunc). For ufuncs with a single output, passing a single array (instead of a @@ -64,8 +60,6 @@ default), then this corresponds to the entire output being filled. Note that outputs not explicitly filled are left with their uninitialized values. -.. versionadded:: 1.13 - Operations where ufunc input and output operands have memory overlap are defined to be the same as for equivalent operations where there is no memory overlap. Operations affected make temporary copies @@ -79,8 +73,6 @@ can be deduced copies are not necessary. As an example, .. rubric:: *where* -.. versionadded:: 1.7 - Accepts a boolean array which is broadcast together with the operands. Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. This argument @@ -91,8 +83,6 @@ will leave those values **uninitialized**. .. rubric:: *axes* -.. versionadded:: 1.15 - A list of tuples with indices of axes a generalized ufunc should operate on. For instance, for a signature of ``(i,j),(j,k)->(i,k)`` appropriate for matrix multiplication, the base elements are two-dimensional matrices @@ -105,8 +95,6 @@ tuples can be omitted. .. rubric:: *axis* -.. versionadded:: 1.15 - A single axis over which a generalized ufunc should operate. This is a short-cut for ufuncs that operate over a single, shared core dimension, equivalent to passing in ``axes`` with entries of ``(axis,)`` for each @@ -116,8 +104,6 @@ for a signature ``(i),(i)->()``, it is equivalent to passing in .. rubric:: *keepdims* -.. versionadded:: 1.15 - If this is set to `True`, axes which are reduced over will be left in the result as a dimension with size one, so that the result will broadcast correctly against the inputs. This option can only be used for generalized @@ -128,8 +114,6 @@ the dimensions in the output can be controlled with ``axes`` and ``axis``. .. rubric:: *casting* -.. versionadded:: 1.6 - May be 'no', 'equiv', 'safe', 'same_kind', or 'unsafe'. See :func:`can_cast` for explanations of the parameter values. @@ -142,8 +126,6 @@ onwards, the default is 'same_kind'. .. rubric:: *order* -.. versionadded:: 1.6 - Specifies the calculation iteration order/memory layout of the output array. Defaults to 'K'. 'C' means the output should be C-contiguous, 'F' means F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous and @@ -152,8 +134,6 @@ the element ordering of the inputs as closely as possible. .. rubric:: *dtype* -.. versionadded:: 1.6 - Overrides the DType of the output arrays the same way as the *signature*. This should ensure a matching precision of the calculation. The exact calculation DTypes chosen may depend on the ufunc and the inputs may be @@ -161,8 +141,6 @@ cast to this DType to perform the calculation. .. rubric:: *subok* -.. versionadded:: 1.6 - Defaults to true. If set to false, the output will always be a strict array, not a subtype. diff --git a/doc/source/release.rst b/doc/source/release.rst index cad71725fe94..a22178a055ee 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,7 +5,15 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.3 + 2.2.2 + 2.2.1 + 2.2.0 + 2.1.3 + 2.1.2 + 2.1.1 2.1.0 + 2.0.2 2.0.1 2.0.0 1.26.4 diff --git a/doc/source/release/1.10.0-notes.rst b/doc/source/release/1.10.0-notes.rst index 88062e4632e9..4a2c4cc5e836 100644 --- a/doc/source/release/1.10.0-notes.rst +++ b/doc/source/release/1.10.0-notes.rst @@ -187,7 +187,7 @@ New Features Reading extra flags from site.cfg --------------------------------- Previously customization of compilation of dependency libraries and numpy -itself was only accomblishable via code changes in the distutils package. +itself was only accomplishable via code changes in the distutils package. Now numpy.distutils reads in the following extra flags from each group of the *site.cfg*: diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index 9d54513edb7c..a0763048a59f 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -4,14 +4,6 @@ NumPy 2.0.0 Release Notes ========================= -.. note:: - - The release of 2.0 is in progress and the current release overview and - highlights are still in a draft state. However, the highlights should - already list the most significant changes detailed in the full notes below, - and those full notes should be complete (if not copy-edited well enough - yet). - NumPy 2.0.0 is the first major release since 2006. It is the result of 11 months of development since the last feature release and is the work of 212 contributors spread over 1078 pull requests. It contains a large number of diff --git a/doc/source/release/2.0.2-notes.rst b/doc/source/release/2.0.2-notes.rst new file mode 100644 index 000000000000..ae5c26250ba7 --- /dev/null +++ b/doc/source/release/2.0.2-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.2 Release Notes +========================== + +NumPy 2.0.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.1 release. + +The Python versions supported by this release are 3.9-3.12. + + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types + diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst index d0b0b6f1b785..bb9c71079062 100644 --- a/doc/source/release/2.1.0-notes.rst +++ b/doc/source/release/2.1.0-notes.rst @@ -1,19 +1,362 @@ .. currentmodule:: numpy -========================== +========================= NumPy 2.1.0 Release Notes -========================== +========================= +NumPy 2.1.0 provides support for the upcoming Python 3.13 release and drops +support for Python 3.9. In addition to the usual bug fixes and updated Python +support, it helps get us back into our usual release cycle after the extended +development of 2.0. The highlights for this release are: -Highlights -========== +- Support for the array-api 2023.12 standard. +- Support for Python 3.13. +- Preliminary support for free threaded Python 3.13. -*We'll choose highlights for this release near the end of the release cycle.* +Python versions 3.10-3.13 are supported in this release. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +New functions +============= -.. **Content from release note snippets in doc/release/upcoming_changes:** +New function ``numpy.unstack`` +------------------------------ + +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. + +(`gh-26579 `__) + + +Deprecations +============ + +* The ``fix_imports`` keyword argument in ``numpy.save`` is deprecated. Since + NumPy 1.17, ``numpy.save`` uses a pickle protocol that no longer supports + Python 2, and ignored ``fix_imports`` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. + + (`gh-26452 `__) + +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. + + (`gh-27076 `__) + + +Expired deprecations +==================== + +* Scalars and 0D arrays are disallowed for ``numpy.nonzero`` and ``numpy.ndarray.nonzero``. + + (`gh-26268 `__) + +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. + + (`gh-26611 `__) + + +C API changes +============= + +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. This means that by default you cannot dynamically fetch the NumPy API +from another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +``NPY_API_SYMBOL_ATTRIBUTE`` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on ``including-the-c-api``. + +(`gh-26103 `__) + +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. + +(`gh-26842 `__) + +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a function +of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the ufunc is +called. It allows the ufunc author to check that core dimensions satisfy +additional constraints, and to set output core dimension sizes if they have not +been provided. + +(`gh-26908 `__) + + +New Features +============ + +Preliminary Support for Free-Threaded CPython 3.13 +-------------------------------------------------- + +CPython 3.13 will be available as an experimental free-threaded build. See +https://py-free-threading.github.io, `PEP 703 +`_ and the `CPython 3.13 release notes +`_ for +more detail about free-threaded Python. + +NumPy 2.1 has preliminary support for the free-threaded build of CPython +3.13. This support was enabled by fixing a number of C thread-safety issues in +NumPy. Before NumPy 2.1, NumPy used a large number of C global static variables +to store runtime caches and other state. We have either refactored to avoid the +need for global state, converted the global state to thread-local state, or +added locking. + +Support for free-threaded Python does not mean that NumPy is thread +safe. Read-only shared access to ndarray should be safe. NumPy exposes shared +mutable state and we have not added any locking to the array object itself to +serialize access to shared state. Care must be taken in user code to avoid +races if you would like to mutate the same array in multiple threads. It is +certainly possible to crash NumPy by mutating an array simultaneously in +multiple threads, for example by calling a ufunc and the ``resize`` method +simultaneously. For now our guidance is: "don't do that". In the future we would +like to provide stronger guarantees. + +Object arrays in particular need special care, since the GIL +previously provided locking for object array access and no longer does. See +`Issue #27199 `_ for more +information about object arrays in the free-threaded build. + +If you are interested in free-threaded Python, for example because you have a +multiprocessing-based workflow that you are interested in running with Python +threads, we encourage testing and experimentation. + +If you run into problems that you suspect are because of NumPy, please `open an +issue `_, checking first if +the bug also occurs in the "regular" non-free-threaded CPython 3.13 build. Many +threading bugs can also occur in code that releases the GIL; disabling the GIL +only makes it easier to hit threading bugs. + +(`gh-26157 `__) + +* ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and + ``copy`` arguments. + + (`gh-26292 `__) + +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. + + (`gh-26501 `__) + +* ``numpy.asanyarray`` now supports ``copy`` and ``device`` arguments, matching + ``numpy.asarray``. + + (`gh-26580 `__) + +* ``numpy.printoptions``, ``numpy.get_printoptions``, and + ``numpy.set_printoptions`` now support a new option, ``override_repr``, for + defining custom ``repr(array)`` behavior. + + (`gh-26611 `__) + +* ``numpy.cumulative_sum`` and ``numpy.cumulative_prod`` were added as Array + API compatible alternatives for ``numpy.cumsum`` and ``numpy.cumprod``. The + new functions can include a fixed initial (zeros for ``sum`` and ones for + ``prod``) in the result. + + (`gh-26724 `__) + +* ``numpy.clip`` now supports ``max`` and ``min`` keyword arguments which are + meant to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or + ``np.clip(a, None, None)`` a copy of the input array will be returned instead + of raising an error. + + (`gh-26724 `__) + +* ``numpy.astype`` now supports ``device`` argument. + + (`gh-26724 `__) + +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. + +(`gh-26981 `__) + + +Improvements +============ + +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by ``histogram_bin_edges``. + +(`gh-12150 `__) + +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ +for more information. + +(`gh-26081 `__) + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +``lapack_lite`` is now thread safe +---------------------------------- +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or segfaults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. + +(`gh-26750 `__) + +The ``numpy.printoptions`` context manager is now thread and async-safe +----------------------------------------------------------------------- +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. + +(`gh-26846 `__) + +Type hinting ``numpy.polynomial`` +--------------------------------- +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. + +(`gh-26897 `__) + +Improved ``numpy.dtypes`` type hints +------------------------------------ +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: The ``numpy.dtype`` type-aliases have been replaced with specialized +``dtype`` *subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. + +(`gh-27008 `__) + + +Performance improvements and changes +==================================== + +* ``numpy.save`` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. + + (`gh-26388 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + + (`gh-27147 `__) + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + + (`gh-27147 `__) + +* Due to a regression in OpenBLAS on windows, the performance improvements when + using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function has been refactored along with ``ma.cov`` and +``ma.corrcoef``. They are now significantly faster, particularly on large, +masked arrays. + +(`gh-26285 `__) + + +Changes +======= + +* As ``numpy.vecdot`` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. + + (`gh-26313 `__) + +* ``numpy.floor``, ``numpy.ceil``, and ``numpy.trunc`` now won't perform + casting to a floating dtype for integer and boolean dtype input arrays. + + (`gh-26766 `__) + +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in ``ma.corrcoef`` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +``ma.cov``, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been replaced +by the more appropriate standard deviation for each variable, which +significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as ``corrcoef`` when using a masked array with no masked values. + +(`gh-26285 `__) + +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. + +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. + +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. + +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. + +(`gh-27091 `__) -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.1.1-notes.rst b/doc/source/release/2.1.1-notes.rst new file mode 100644 index 000000000000..79c63514695c --- /dev/null +++ b/doc/source/release/2.1.1-notes.rst @@ -0,0 +1,41 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.1 Release Notes +========================== + +NumPy 2.1.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.0 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines + diff --git a/doc/source/release/2.1.2-notes.rst b/doc/source/release/2.1.2-notes.rst new file mode 100644 index 000000000000..1a187dbd3365 --- /dev/null +++ b/doc/source/release/2.1.2-notes.rst @@ -0,0 +1,48 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.2 Release Notes +========================== + +NumPy 2.1.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.1 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/source/release/2.1.3-notes.rst b/doc/source/release/2.1.3-notes.rst new file mode 100644 index 000000000000..cd797e0062a0 --- /dev/null +++ b/doc/source/release/2.1.3-notes.rst @@ -0,0 +1,81 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.3 Release Notes +========================== + +NumPy 2.1.3 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.2 release. + +The Python versions supported by this release are 3.10-3.13. + + +Improvements +============ + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters + diff --git a/doc/source/release/2.2.0-notes.rst b/doc/source/release/2.2.0-notes.rst new file mode 100644 index 000000000000..41b3d2b58004 --- /dev/null +++ b/doc/source/release/2.2.0-notes.rst @@ -0,0 +1,210 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.0 Release Notes +========================== + +The NumPy 2.2.0 release is quick release that brings us back into sync with the +usual twice yearly release cycle. There have been an number of small cleanups, +as well as work bringing the new StringDType to completion and improving support +for free threaded Python. Highlights are: + +* New functions ``matvec`` and ``vecmat``, see below. +* Many improved annotations. +* Improved support for the new StringDType. +* Improved support for free threaded Python +* Fixes for f2py + +This release supports Python versions 3.10-3.13. + + +Deprecations +============ + +* ``_add_newdoc_ufunc`` is now deprecated. ``ufunc.__doc__ = newdoc`` should + be used instead. + + (`gh-27735 `__) + + +Expired deprecations +==================== + +* ``bool(np.array([]))`` and other empty arrays will now raise an error. + Use ``arr.size > 0`` instead to check whether an array has no elements. + + (`gh-27160 `__) + + +Compatibility notes +=================== + +* `numpy.cov` now properly transposes single-row (2d array) design matrices + when ``rowvar=False``. Previously, single-row design matrices would return a + scalar in this scenario, which is not correct, so this is a behavior change + and an array of the appropriate shape will now be returned. + + (`gh-27661 `__) + + +New Features +============ + +* New functions for matrix-vector and vector-matrix products + + Two new generalized ufuncs were defined: + + * `numpy.matvec` - matrix-vector product, treating the arguments as + stacks of matrices and column vectors, respectively. + + * `numpy.vecmat` - vector-matrix product, treating the arguments as + stacks of column vectors and matrices, respectively. For complex + vectors, the conjugate is taken. + + These add to the existing `numpy.matmul` as well as to `numpy.vecdot`, + which was added in numpy 2.0. + + Note that `numpy.matmul` never takes a complex conjugate, also not + when its left input is a vector, while both `numpy.vecdot` and + `numpy.vecmat` do take the conjugate for complex vectors on the + left-hand side (which are taken to be the ones that are transposed, + following the physics convention). + + (`gh-25675 `__) + +* ``np.complexfloating[T, T]`` can now also be written as + ``np.complexfloating[T]`` + + (`gh-27420 `__) + +* UFuncs now support ``__dict__`` attribute and allow overriding ``__doc__`` + (either directly or via ``ufunc.__dict__["__doc__"]``). ``__dict__`` can be + used to also override other properties, such as ``__module__`` or + ``__qualname__``. + + (`gh-27735 `__) + +* The "nbit" type parameter of ``np.number`` and its subtypes now defaults + to ``typing.Any``. This way, type-checkers will infer annotations such as + ``x: np.floating`` as ``x: np.floating[Any]``, even in strict mode. + + (`gh-27736 `__) + + +Improvements +============ + +* The ``datetime64`` and ``timedelta64`` hashes now correctly match the Pythons + builtin ``datetime`` and ``timedelta`` ones. The hashes now evaluated equal + even for equal values with different time units. + + (`gh-14622 `__) + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + +* Improved support for empty `memmap`. Previously an empty `memmap` would fail + unless a non-zero ``offset`` was set. Now a zero-size `memmap` is supported + even if ``offset=0``. To achieve this, if a `memmap` is mapped to an empty + file that file is padded with a single byte. + + (`gh-27723 `__) + +``f2py`` handles multiple modules and exposes variables again +------------------------------------------------------------- +A regression has been fixed which allows F2PY users to expose variables to +Python in modules with only assignments, and also fixes situations where +multiple modules are present within a single source file. + +(`gh-27695 `__) + + +Performance improvements and changes +==================================== + +* Improved multithreaded scaling on the free-threaded build when many threads + simultaneously call the same ufunc operations. + + (`gh-27896 `__) + +* NumPy now uses fast-on-failure attribute lookups for protocols. This can + greatly reduce overheads of function calls or array creation especially with + custom Python objects. The largest improvements will be seen on Python 3.12 + or newer. + + (`gh-27119 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + +* Due to a regression in OpenBLAS on windows, the performance improvements + when using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +* NumPy now indicates hugepages also for large ``np.zeros`` allocations + on linux. Thus should generally improve performance. + + (`gh-27808 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + +* The type annotations of ``numpy.float64`` and ``numpy.complex128`` now + reflect that they are also subtypes of the built-in ``float`` and ``complex`` + types, respectively. This update prevents static type-checkers from reporting + errors in cases such as: + + .. code-block:: python + + x: float = numpy.float64(6.28) # valid + z: complex = numpy.complex128(-1j) # valid + + (`gh-27334 `__) + +* The ``repr`` of arrays large enough to be summarized (i.e., where elements + are replaced with ``...``) now includes the ``shape`` of the array, similar + to what already was the case for arrays with zero size and non-obvious + shape. With this change, the shape is always given when it cannot be + inferred from the values. Note that while written as ``shape=...``, this + argument cannot actually be passed in to the ``np.array`` constructor. If + you encounter problems, e.g., due to failing doctests, you can use the print + option ``legacy=2.1`` to get the old behaviour. + + (`gh-27482 `__) + +* Calling ``__array_wrap__`` directly on NumPy arrays or scalars now does the + right thing when ``return_scalar`` is passed (Added in NumPy 2). It is + further safe now to call the scalar ``__array_wrap__`` on a non-scalar + result. + + (`gh-27807 `__) + +Bump the musllinux CI image and wheels to 1_2 from 1_1. This is because 1_1 is +`end of life `_. + +(`gh-27088 `__) + +NEP 50 promotion state option removed +------------------------------------- +The NEP 50 promotion state settings are now removed. They were always meant as +temporary means for testing. A warning will be given if the environment +variable is set to anything but ``NPY_PROMOTION_STATE=weak`` while +``_set_promotion_state`` and ``_get_promotion_state`` are removed. In case +code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` could be used to +replace it when not available. + +(`gh-27156 `__) + diff --git a/doc/source/release/2.2.1-notes.rst b/doc/source/release/2.2.1-notes.rst new file mode 100644 index 000000000000..fe60fa0268f3 --- /dev/null +++ b/doc/source/release/2.2.1-notes.rst @@ -0,0 +1,54 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.1 Release Notes +========================== + +NumPy 2.2.1 is a patch release following 2.2.0. It fixes bugs found after the +2.2.0 release and has several maintenance pins to work around upstream changes. + +There was some breakage in downstream projects following the 2.2.0 release due +to updates to NumPy typing. Because of problems due to MyPy defects, we +recommend using basedpyright for type checking, it can be installed from +PyPI. The Pylance extension for Visual Studio Code is also based on Pyright. +Problems that persist when using basedpyright should be reported as issues +on the NumPy github site. + +This release supports Python 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer + diff --git a/doc/source/release/2.2.2-notes.rst b/doc/source/release/2.2.2-notes.rst new file mode 100644 index 000000000000..8a3de547ec81 --- /dev/null +++ b/doc/source/release/2.2.2-notes.rst @@ -0,0 +1,49 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.2 Release Notes +========================== + +NumPy 2.2.2 is a patch release that fixes bugs found after the 2.2.1 release. +The number of typing fixes/updates is notable. This release supports Python +versions 3.10-3.13. + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports + diff --git a/doc/source/release/2.2.3-notes.rst b/doc/source/release/2.2.3-notes.rst new file mode 100644 index 000000000000..cf21d751ec00 --- /dev/null +++ b/doc/source/release/2.2.3-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.3 Release Notes +========================== + +NumPy 2.2.3 is a patch release that fixes bugs found after the 2.2.2 release. +The majority of the changes are typing improvements and fixes for free +threaded Python. Both of those areas are still under development, so if you +discover new problems, please report them. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 61468132879f..950b9c36b373 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -430,7 +430,7 @@ With ``np.reshape``, you can specify a few optional parameters:: ``a`` is the array to be reshaped. -``newshape`` is the new shape you want. You can specify an integer or a tuple of +``shape`` is the new shape you want. You can specify an integer or a tuple of integers. If you specify an integer, the result will be an array of that length. The shape should be compatible with the original shape. @@ -664,7 +664,10 @@ where you want to slice your array. :: array([4, 5, 6, 7, 8]) Here, you grabbed a section of your array from index position 3 through index -position 8. +position 8 but not including position 8 itself. + +*Reminder: Array indexes begin at 0. This means the first element of the array is at index 0, +the second element is at index 1, and so on.* You can also stack two existing arrays, both vertically and horizontally. Let's say you have two arrays, ``a1`` and ``a2``:: diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index 6c09adfdff54..1a7707ee69c9 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -87,7 +87,7 @@ you create the array. =========================================== .. - 40 functions seems like a small number, but the routies.array-creation + 40 functions seems like a small number, but the routines.array-creation has ~47. I'm sure there are more. NumPy has over 40 built-in functions for creating arrays as laid diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst index 64dd46153091..d5b6bba8f28d 100644 --- a/doc/source/user/basics.io.genfromtxt.rst +++ b/doc/source/user/basics.io.genfromtxt.rst @@ -131,10 +131,6 @@ marker(s) is simply ignored:: [7., 8.], [9., 0.]]) -.. versionadded:: 1.7.0 - - When ``comments`` is set to ``None``, no lines are treated as comments. - .. note:: There is one notable exception to this behavior: if the optional argument diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst index 8402ee7f8e17..af14bcd10201 100644 --- a/doc/source/user/basics.rec.rst +++ b/doc/source/user/basics.rec.rst @@ -535,7 +535,7 @@ Similarly to tuples, structured scalars can also be indexed with an integer:: >>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0] >>> scalar[0] - 1 + np.int32(1) >>> scalar[1] = 4 Thus, tuples might be thought of as the native Python equivalent to numpy's @@ -595,7 +595,7 @@ removed:: >>> dt = np.dtype("i1,V3,i4,V1")[["f0", "f2"]] >>> dt - dtype({'names':['f0','f2'], 'formats':['i1','>> np.result_type(dt) dtype([('f0', 'i1'), ('f2', '>> dt = np.dtype("i1,V3,i4,V1", align=True)[["f0", "f2"]] >>> dt - dtype({'names':['f0','f2'], 'formats':['i1','>> np.result_type(dt) dtype([('f0', 'i1'), ('f2', '>> np.result_type(dt).isalignedstruct diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index e0baba938f16..7b1e8fd34512 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -42,7 +42,7 @@ This can result in surprising behavior if you use NumPy methods or functions you have not explicitly tested. On the other hand, compared to other interoperability approaches, -subclassing can be a useful because many thing will "just work". +subclassing can be useful because many things will "just work". This means that subclassing can be a convenient approach and for a long time it was also often the only available approach. @@ -227,7 +227,7 @@ like:: obj = ndarray.__new__(subtype, shape, ... -where ``subdtype`` is the subclass. Thus the returned view is of the +where ``subtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now @@ -461,8 +461,6 @@ So: ``__array_ufunc__`` for ufuncs ============================== -.. versionadded:: 1.13 - A subclass can override what happens when executing numpy ufuncs on it by overriding the default ``ndarray.__array_ufunc__`` method. This method is executed *instead* of the ufunc and should return either the result of the diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 0b665574cbdc..a605d32fcd51 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -314,7 +314,7 @@ but gives -1486618624 (incorrect) for a 32-bit integer. >>> np.power(100, 9, dtype=np.int64) 1000000000000000000 >>> np.power(100, 9, dtype=np.int32) - -1486618624 + np.int32(-1486618624) The behaviour of NumPy and Python integer types differs significantly for integer overflows and may confuse users expecting NumPy integers to behave @@ -342,6 +342,30 @@ range of possible values. >>> np.power(100, 100, dtype=np.float64) 1e+200 +Floating point precision +======================== + +Many functions in NumPy, especially those in `numpy.linalg`, involve floating-point +arithmetic, which can introduce small inaccuracies due to the way computers +represent decimal numbers. For instance, when performing basic arithmetic operations +involving floating-point numbers: + + >>> 0.3 - 0.2 - 0.1 # This does not equal 0 due to floating-point precision + -2.7755575615628914e-17 + +To handle such cases, it's advisable to use functions like `np.isclose` to compare +values, rather than checking for exact equality: + + >>> np.isclose(0.3 - 0.2 - 0.1, 0, rtol=1e-05) # Check for closeness to 0 + True + +In this example, `np.isclose` accounts for the minor inaccuracies that occur in +floating-point calculations by applying a relative tolerance, ensuring that results +within a small threshold are considered close. + +For information about precision in calculations, see `Floating-Point Arithmetic `_. + + Extended precision ================== diff --git a/doc/source/user/byteswapping.rst b/doc/source/user/byteswapping.rst index 01247500347f..8f08d2a01a3d 100644 --- a/doc/source/user/byteswapping.rst +++ b/doc/source/user/byteswapping.rst @@ -40,9 +40,9 @@ there are two integers, and that they are 16 bit and big-endian: >>> import numpy as np >>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer) >>> big_end_arr[0] -1 +np.int16(1) >>> big_end_arr[1] -770 +np.int16(770) Note the array ``dtype`` above of ``>i2``. The ``>`` means 'big-endian' (``<`` is little-endian) and ``i2`` means 'signed 2-byte integer'. For @@ -99,14 +99,14 @@ We make something where they don't match: >>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='>> wrong_end_dtype_arr[0] -256 +np.int16(256) The obvious fix for this situation is to change the dtype so it gives the correct endianness: >>> fixed_end_dtype_arr = wrong_end_dtype_arr.view(np.dtype('>> fixed_end_dtype_arr[0] -1 +np.int16(1) Note the array has not changed in memory: @@ -122,7 +122,7 @@ that needs a certain byte ordering. >>> fixed_end_mem_arr = wrong_end_dtype_arr.byteswap() >>> fixed_end_mem_arr[0] -1 +np.int16(1) Now the array *has* changed in memory: @@ -140,7 +140,7 @@ the previous operations: >>> swapped_end_arr = big_end_arr.byteswap() >>> swapped_end_arr = swapped_end_arr.view(swapped_end_arr.dtype.newbyteorder()) >>> swapped_end_arr[0] -1 +np.int16(1) >>> swapped_end_arr.tobytes() == big_end_buffer False @@ -149,7 +149,7 @@ can be achieved with the ndarray astype method: >>> swapped_end_arr = big_end_arr.astype('>> swapped_end_arr[0] -1 +np.int16(1) >>> swapped_end_arr.tobytes() == big_end_buffer False diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 697c0c045e4f..7bf793ae2e47 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,6 +268,9 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. +Note that this API is inherently thread-unsafe. See `thread_safety` for more +details about thread safety in NumPy. + Registering a casting function ------------------------------ diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index 753a44a0174f..d791341ac560 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -831,7 +831,7 @@ file that defines the interface. Often, however, this ``.i`` file can be parts of the header itself. The interface usually needs a bit of tweaking to be very useful. This ability to parse C/C++ headers and auto-generate the interface still makes SWIG a useful approach to -adding functionalilty from C/C++ into Python, despite the other +adding functionality from C/C++ into Python, despite the other methods that have emerged that are more targeted to Python. SWIG can actually target extensions for several languages, but the typemaps usually have to be language-specific. Nonetheless, with modifications diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index ca4abcd13746..a90fbecfdec4 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -343,6 +343,6 @@ storage. >>> import os >>> # list all files created in testsetup. If needed there are - >>> # convenienes in e.g. astroquery to do this more automatically + >>> # conveniences in e.g. astroquery to do this more automatically >>> for filename in ['csv.txt', 'fixedwidth.txt', 'nan.txt', 'skip.txt', 'tabs.txt']: ... os.remove(filename) diff --git a/doc/source/user/plots/matplotlib1.py b/doc/source/user/plots/matplotlib1.py index 2cbf87ffa2fa..1c3009a93e66 100644 --- a/doc/source/user/plots/matplotlib1.py +++ b/doc/source/user/plots/matplotlib1.py @@ -3,5 +3,5 @@ a = np.array([2, 1, 5, 7, 4, 6, 8, 14, 10, 9, 18, 20, 22]) -plt.plot(a) -plt.show() \ No newline at end of file +plt.plot(a) +plt.show() diff --git a/doc/source/user/plots/matplotlib2.py b/doc/source/user/plots/matplotlib2.py index e15986c2512d..db1d6bda4671 100644 --- a/doc/source/user/plots/matplotlib2.py +++ b/doc/source/user/plots/matplotlib2.py @@ -5,4 +5,4 @@ y = np.linspace(0, 10, 20) plt.plot(x, y, 'purple') # line plt.plot(x, y, 'o') # dots -plt.show() \ No newline at end of file +plt.show() diff --git a/doc/source/user/plots/matplotlib3.py b/doc/source/user/plots/matplotlib3.py index 7b56067ef463..135afe823c08 100644 --- a/doc/source/user/plots/matplotlib3.py +++ b/doc/source/user/plots/matplotlib3.py @@ -11,4 +11,4 @@ ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='viridis') -plt.show() \ No newline at end of file +plt.show() diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 4d418af44ddb..3f97f005898b 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -881,7 +881,7 @@ creates a new array object that looks at the same data. >>> c.flags.owndata False >>> - >>> c = c.reshape((2, 6)) # a's shape doesn't change + >>> c = c.reshape((2, 6)) # a's shape doesn't change, reassigned c is still a view of a >>> a.shape (3, 4) >>> c[0, 4] = 1234 # a's data changes @@ -929,6 +929,8 @@ a small fraction of ``a``, a deep copy should be made when constructing ``b`` wi If ``b = a[:100]`` is used instead, ``a`` is referenced by ``b`` and will persist in memory even if ``del a`` is executed. +See also :ref:`basics.copies-and-views`. + Functions and methods overview ------------------------------ diff --git a/environment.yml b/environment.yml index 86ee1058f440..46655d750d0d 100644 --- a/environment.yml +++ b/environment.yml @@ -7,17 +7,16 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.11 #need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas - nomkl - - setuptools + - setuptools==65.5.1 - ninja - pkg-config - meson-python - - pip - - spin=0.8 # Unpin when spin 0.9.1 is released + - spin==0.13 - ccache # For testing - pytest @@ -26,7 +25,8 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.10.0 + - mypy=1.14.1 + - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 - sphinx-copybutton @@ -42,7 +42,7 @@ dependencies: # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting - - pycodestyle=2.8.0 + - pycodestyle=2.12.1 - gitpython # Used in some tests - cffi diff --git a/meson_options.txt b/meson.options similarity index 100% rename from meson_options.txt rename to meson.options diff --git a/meson_cpu/main_config.h.in b/meson_cpu/main_config.h.in index 0952adf67353..d89e62f5f66b 100644 --- a/meson_cpu/main_config.h.in +++ b/meson_cpu/main_config.h.in @@ -11,7 +11,7 @@ */ #ifndef @P@_CPU_DISPATCHER_CONF_H_ #define @P@_CPU_DISPATCHER_CONF_H_ -/// This definition is required to provides comptablity with NumPy distutils +/// This definition is required to provide compatibility with NumPy distutils #define @P@_CPU_MESON_BUILD /** * @def @P@WITH_CPU_BASELINE @@ -46,7 +46,7 @@ /** * @def @P@WITH_CPU_BASELINE_CALL(EXEC_CB, ...) * Call each enabled baseline feature sorted by lowest interest - * using preprocessor callback without testing whiher the + * using preprocessor callback without testing whether the * feature is supported by CPU or not. * * Required for logging purposes only, for example, generating diff --git a/numpy/__config__.py.in b/numpy/__config__.py.in index ce224e49a15d..a62f531c3769 100644 --- a/numpy/__config__.py.in +++ b/numpy/__config__.py.in @@ -7,7 +7,7 @@ from numpy._core._multiarray_umath import ( __cpu_dispatch__, ) -__all__ = ["show"] +__all__ = ["show_config"] _built_with_meson = True @@ -160,3 +160,11 @@ def show(mode=DisplayModes.stdout.value): raise AttributeError( f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" ) + + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi new file mode 100644 index 000000000000..bd01228a1cc8 --- /dev/null +++ b/numpy/__config__.pyi @@ -0,0 +1,102 @@ +from enum import Enum +from types import ModuleType +from typing import Final, Literal as L, TypedDict, overload, type_check_only +from typing_extensions import NotRequired + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host":_MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 2151a18b1e80..e35cef5fa1a8 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -151,6 +151,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: @@ -757,6 +758,23 @@ cdef extern from "numpy/arrayobject.h": npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + # additional datetime related functions are defined below diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 8e7583bcb97d..89fe913b9cd3 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -160,6 +160,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: @@ -672,6 +673,23 @@ cdef extern from "numpy/arrayobject.h": npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + # additional datetime related functions are defined below diff --git a/numpy/__init__.py b/numpy/__init__.py index 27e5d2d6801d..2a4fd03b6a44 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -111,7 +111,7 @@ from . import _distributor_init try: - from numpy.__config__ import show as show_config + from numpy.__config__ import show_config except ImportError as e: msg = """Error importing numpy: you should not try to import numpy from its source directory; please exit the numpy source tree, and relaunch @@ -120,8 +120,8 @@ from . import _core from ._core import ( - False_, ScalarType, True_, _get_promotion_state, _no_nep50_warning, - _set_promotion_state, abs, absolute, acos, acosh, add, all, allclose, + False_, ScalarType, True_, + abs, absolute, acos, acosh, add, all, allclose, amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, argwhere, around, array, array2string, array_equal, array_equiv, @@ -144,17 +144,17 @@ frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter, frompyfunc, fromstring, full, full_like, gcd, generic, geomspace, get_printoptions, getbufsize, geterr, geterrcall, greater, - greater_equal, half, heaviside, hstack, hypot, identity, iinfo, iinfo, + greater_equal, half, heaviside, hstack, hypot, identity, iinfo, indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc, integer, intp, invert, is_busday, isclose, isdtype, isfinite, isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp, left_shift, less, less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, - matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, - min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, - ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, - not_equal, number, object_, ones, ones_like, outer, partition, + matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap, + min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, + ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, + nonzero, not_equal, number, object_, ones, ones_like, outer, partition, permute_dims, pi, positive, pow, power, printoptions, prod, promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, reciprocal, record, remainder, repeat, require, reshape, resize, @@ -165,11 +165,11 @@ str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, void, - vstack, where, zeros, zeros_like + ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, + vecmat, void, vstack, where, zeros, zeros_like ) - # NOTE: It's still under discussion whether these aliases + # NOTE: It's still under discussion whether these aliases # should be removed. for ta in ["float96", "float128", "complex192", "complex256"]: try: @@ -184,12 +184,12 @@ histogram, histogram_bin_edges, histogramdd ) from .lib._nanfunctions_impl import ( - nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, + nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd, nansum, nanvar ) from .lib._function_base_impl import ( - select, piecewise, trim_zeros, copy, iterable, percentile, diff, + select, piecewise, trim_zeros, copy, iterable, percentile, diff, gradient, angle, unwrap, sort_complex, flip, rot90, extract, place, vectorize, asarray_chkfinite, average, bincount, digitize, cov, corrcoef, median, sinc, hamming, hanning, bartlett, blackman, @@ -197,8 +197,8 @@ interp, quantile ) from .lib._twodim_base_impl import ( - diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, - histogram2d, mask_indices, tril_indices, tril_indices_from, + diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, + histogram2d, mask_indices, tril_indices, tril_indices_from, triu_indices, triu_indices_from ) from .lib._shape_base_impl import ( @@ -207,7 +207,7 @@ take_along_axis, tile, vsplit ) from .lib._type_check_impl import ( - iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, + iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, real_if_close, typename, mintypecode, common_type ) from .lib._arraysetops_impl import ( @@ -232,7 +232,7 @@ ) from .lib._index_tricks_impl import ( diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate, - ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, + ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, index_exp ) @@ -246,7 +246,7 @@ # (experimental label) are not added here, because `from numpy import *` # must not raise any warnings - that's too disruptive. __numpy_submodules__ = { - "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "linalg", "fft", "dtypes", "random", "polynomial", "ma", "exceptions", "lib", "ctypeslib", "testing", "typing", "f2py", "test", "rec", "char", "core", "strings", } @@ -395,7 +395,7 @@ def __getattr__(attr): if attr in __former_attrs__: raise AttributeError(__former_attrs__[attr], name=None) - + if attr in __expired_attributes__: raise AttributeError( f"`np.{attr}` was removed in the NumPy 2.0 release. " @@ -419,7 +419,7 @@ def __dir__(): globals().keys() | __numpy_submodules__ ) public_symbols -= { - "matrixlib", "matlib", "tests", "conftest", "version", + "matrixlib", "matlib", "tests", "conftest", "version", "compat", "distutils", "array_api" } return list(public_symbols) @@ -443,7 +443,7 @@ def _sanity_check(): try: x = ones(2, dtype=float32) if not abs(x.dot(x) - float32(2.0)) < 1e-5: - raise AssertionError() + raise AssertionError except AssertionError: msg = ("The current Numpy installation ({!r}) fails to " "pass simple sanity checks. This can be caused for example " @@ -477,7 +477,9 @@ def _mac_os_check(): for _wn in w: if _wn.category is exceptions.RankWarning: # Ignore other warnings, they may not be relevant (see gh-25433). - error_message = f"{_wn.category.__name__}: {str(_wn.message)}" + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) msg = ( "Polyfit sanity test emitted a warning, most likely due " "to using a buggy Accelerate backend." @@ -493,7 +495,7 @@ def _mac_os_check(): def hugepage_setup(): """ We usually use madvise hugepages support, but on some old kernels it - is slow and thus better avoided. Specifically kernel version 4.6 + is slow and thus better avoided. Specifically kernel version 4.6 had a bug fix which probably fixed this: https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff """ @@ -502,7 +504,7 @@ def hugepage_setup(): # If there is an issue with parsing the kernel version, # set use_hugepage to 0. Usage of LooseVersion will handle # the kernel version parsing better, but avoided since it - # will increase the import time. + # will increase the import time. # See: #16679 for related discussion. try: use_hugepage = 1 @@ -529,8 +531,11 @@ def hugepage_setup(): _core.multiarray._multiarray_umath._reload_guard() # TODO: Remove the environment variable entirely now that it is "weak" - _core._set_promotion_state( - os.environ.get("NPY_PROMOTION_STATE", "weak")) + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) # Tell PyInstaller where to find hook-numpy.py def _pyinstaller_hooks_dir(): diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e73d6f16765b..1a2d6a08bbb1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,16 +1,18 @@ +# ruff: noqa: I001 import builtins import sys -import os import mmap import ctypes as ct import array as _array import datetime as dt -import enum from abc import abstractmethod -from types import TracebackType, MappingProxyType, GenericAlias -from contextlib import contextmanager +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from decimal import Decimal +from fractions import Fraction +from uuid import UUID import numpy as np +from numpy.__config__ import show as show_config from numpy._pytesttester import PytestTester from numpy._core._internal import _ctypes @@ -18,30 +20,27 @@ from numpy._typing import ( # Arrays ArrayLike, NDArray, - _ArrayLike, _SupportsArray, _NestedSequence, _FiniteNestedSequence, - _SupportsArray, + _ArrayLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, + _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, - _ArrayLikeStr_co, - _ArrayLikeBytes_co, - _ArrayLikeUnknown, - _UnknownType, # DTypes DTypeLike, _DTypeLike, _DTypeLikeVoid, - _SupportsDType, _VoidDTypeLike, # Shapes @@ -72,7 +71,6 @@ from numpy._typing import ( _NBitShort, _NBitIntC, _NBitIntP, - _NBitInt, _NBitLong, _NBitLongLong, _NBitHalf, @@ -120,6 +118,18 @@ from numpy._typing import ( _BytesCodes, _VoidCodes, _ObjectCodes, + _StringCodes, + + _UnsignedIntegerCodes, + _SignedIntegerCodes, + _IntegerCodes, + _FloatingCodes, + _ComplexFloatingCodes, + _InexactCodes, + _NumberCodes, + _CharacterCodes, + _FlexibleCodes, + _GenericCodes, # Ufuncs _UFunc_Nin1_Nout1, @@ -136,7 +146,6 @@ from numpy._typing._callable import ( _BoolTrueDiv, _BoolMod, _BoolDivMod, - _TD64Div, _IntTrueDiv, _UnsignedIntOp, _UnsignedIntBitOp, @@ -149,7 +158,6 @@ from numpy._typing._callable import ( _FloatOp, _FloatMod, _FloatDivMod, - _ComplexOp, _NumberOp, _ComparisonOpLT, _ComparisonOpLE, @@ -160,21 +168,21 @@ from numpy._typing._callable import ( # NOTE: Numpy's mypy plugin is used for removing the types unavailable # to the specific platform from numpy._typing._extended_precision import ( - uint128 as uint128, - uint256 as uint256, - int128 as int128, - int256 as int256, - float80 as float80, - float96 as float96, - float128 as float128, - float256 as float256, - complex160 as complex160, - complex192 as complex192, - complex256 as complex256, - complex512 as complex512, + uint128, + uint256, + int128, + int256, + float80, + float96, + float128, + float256, + complex160, + complex192, + complex256, + complex512, ) -from numpy._array_api_info import __array_namespace_info__ as __array_namespace_info__ +from numpy._array_api_info import __array_namespace_info__ from collections.abc import ( Callable, @@ -183,485 +191,709 @@ from collections.abc import ( Mapping, Sequence, ) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer: TypeAlias = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + from typing import ( - TYPE_CHECKING, - Literal as L, Any, - Generator, + ClassVar, + Final, Generic, + Literal as L, NoReturn, - overload, SupportsComplex, SupportsFloat, SupportsInt, - TypeVar, - Protocol, SupportsIndex, - Final, - final, - ClassVar, TypeAlias, + TypedDict, + final, + type_check_only, ) -if sys.version_info >= (3, 11): - from typing import LiteralString -elif TYPE_CHECKING: - from typing_extensions import LiteralString -else: - LiteralString: TypeAlias = str +# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even +# if not available at runtime. This is because the `typeshed` stubs for the standard +# library include `typing_extensions` stubs: +# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi +from _typeshed import StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, LiteralString, Never, Protocol, Self, TypeVar, Unpack, deprecated, overload -# Ensures that the stubs are picked up from numpy import ( - ctypeslib as ctypeslib, - exceptions as exceptions, - fft as fft, - lib as lib, - linalg as linalg, - ma as ma, - polynomial as polynomial, - random as random, - testing as testing, - version as version, - exceptions as exceptions, - dtypes as dtypes, - rec as rec, - char as char, - strings as strings, + char, + core, + ctypeslib, + dtypes, + exceptions, + f2py, + fft, + lib, + linalg, + ma, + polynomial, + random, + rec, + strings, + testing, + typing, ) -from numpy._core.records import ( - record as record, - recarray as recarray, +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, ) +if sys.version_info < (3, 12): + from numpy import distutils as distutils -from numpy._core.defchararray import ( - chararray as chararray, +from numpy._core.records import ( + record, + recarray, ) from numpy._core.function_base import ( - linspace as linspace, - logspace as logspace, - geomspace as geomspace, + linspace, + logspace, + geomspace, ) from numpy._core.fromnumeric import ( - take as take, - reshape as reshape, - choose as choose, - repeat as repeat, - put as put, - swapaxes as swapaxes, - transpose as transpose, - matrix_transpose as matrix_transpose, - partition as partition, - argpartition as argpartition, - sort as sort, - argsort as argsort, - argmax as argmax, - argmin as argmin, - searchsorted as searchsorted, - resize as resize, - squeeze as squeeze, - diagonal as diagonal, - trace as trace, - ravel as ravel, - nonzero as nonzero, - shape as shape, - compress as compress, - clip as clip, - sum as sum, - all as all, - any as any, - cumsum as cumsum, - cumulative_sum as cumulative_sum, - ptp as ptp, - max as max, - min as min, - amax as amax, - amin as amin, - prod as prod, - cumprod as cumprod, - cumulative_prod as cumulative_prod, - ndim as ndim, - size as size, - around as around, - round as round, - mean as mean, - std as std, - var as var, + take, + reshape, + choose, + repeat, + put, + swapaxes, + transpose, + matrix_transpose, + partition, + argpartition, + sort, + argsort, + argmax, + argmin, + searchsorted, + resize, + squeeze, + diagonal, + trace, + ravel, + nonzero, + shape, + compress, + clip, + sum, + all, + any, + cumsum, + cumulative_sum, + ptp, + max, + min, + amax, + amin, + prod, + cumprod, + cumulative_prod, + ndim, + size, + around, + round, + mean, + std, + var, ) from numpy._core._asarray import ( - require as require, + require, ) from numpy._core._type_aliases import ( - sctypeDict as sctypeDict, + sctypeDict, ) from numpy._core._ufunc_config import ( - seterr as seterr, - geterr as geterr, - setbufsize as setbufsize, - getbufsize as getbufsize, - seterrcall as seterrcall, - geterrcall as geterrcall, + seterr, + geterr, + setbufsize, + getbufsize, + seterrcall, + geterrcall, _ErrKind, - _ErrFunc, + _ErrCall, ) from numpy._core.arrayprint import ( - set_printoptions as set_printoptions, - get_printoptions as get_printoptions, - array2string as array2string, - format_float_scientific as format_float_scientific, - format_float_positional as format_float_positional, - array_repr as array_repr, - array_str as array_str, - printoptions as printoptions, + set_printoptions, + get_printoptions, + array2string, + format_float_scientific, + format_float_positional, + array_repr, + array_str, + printoptions, ) from numpy._core.einsumfunc import ( - einsum as einsum, - einsum_path as einsum_path, + einsum, + einsum_path, ) from numpy._core.multiarray import ( - array as array, - empty_like as empty_like, - empty as empty, - zeros as zeros, - concatenate as concatenate, - inner as inner, - where as where, - lexsort as lexsort, - can_cast as can_cast, - min_scalar_type as min_scalar_type, - result_type as result_type, - dot as dot, - vdot as vdot, - bincount as bincount, - copyto as copyto, - putmask as putmask, - packbits as packbits, - unpackbits as unpackbits, - shares_memory as shares_memory, - may_share_memory as may_share_memory, - asarray as asarray, - asanyarray as asanyarray, - ascontiguousarray as ascontiguousarray, - asfortranarray as asfortranarray, - arange as arange, - busday_count as busday_count, - busday_offset as busday_offset, - datetime_as_string as datetime_as_string, - datetime_data as datetime_data, - frombuffer as frombuffer, - fromfile as fromfile, - fromiter as fromiter, - is_busday as is_busday, - promote_types as promote_types, - fromstring as fromstring, - frompyfunc as frompyfunc, - nested_iters as nested_iters, + array, + empty_like, + empty, + zeros, + concatenate, + inner, + where, + lexsort, + can_cast, + min_scalar_type, + result_type, + dot, + vdot, + bincount, + copyto, + putmask, + packbits, + unpackbits, + shares_memory, + may_share_memory, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + arange, + busday_count, + busday_offset, + datetime_as_string, + datetime_data, + frombuffer, + fromfile, + fromiter, + is_busday, + promote_types, + fromstring, + frompyfunc, + nested_iters, flagsobj, ) from numpy._core.numeric import ( - zeros_like as zeros_like, - ones as ones, - ones_like as ones_like, - full as full, - full_like as full_like, - count_nonzero as count_nonzero, - isfortran as isfortran, - argwhere as argwhere, - flatnonzero as flatnonzero, - correlate as correlate, - convolve as convolve, - outer as outer, - tensordot as tensordot, - roll as roll, - rollaxis as rollaxis, - moveaxis as moveaxis, - cross as cross, - indices as indices, - fromfunction as fromfunction, - isscalar as isscalar, - binary_repr as binary_repr, - base_repr as base_repr, - identity as identity, - allclose as allclose, - isclose as isclose, - array_equal as array_equal, - array_equiv as array_equiv, - astype as astype, + zeros_like, + ones, + ones_like, + full, + full_like, + count_nonzero, + isfortran, + argwhere, + flatnonzero, + correlate, + convolve, + outer, + tensordot, + roll, + rollaxis, + moveaxis, + cross, + indices, + fromfunction, + isscalar, + binary_repr, + base_repr, + identity, + allclose, + isclose, + array_equal, + array_equiv, + astype, ) from numpy._core.numerictypes import ( - isdtype as isdtype, - issubdtype as issubdtype, - ScalarType as ScalarType, - typecodes as typecodes, + isdtype, + issubdtype, + ScalarType, + typecodes, ) from numpy._core.shape_base import ( - atleast_1d as atleast_1d, - atleast_2d as atleast_2d, - atleast_3d as atleast_3d, - block as block, - hstack as hstack, - stack as stack, - vstack as vstack, - unstack as unstack, + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + vstack, + unstack, ) +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ + from numpy.lib import ( scimath as emath, ) from numpy.lib._arraypad_impl import ( - pad as pad, + pad, ) from numpy.lib._arraysetops_impl import ( - ediff1d as ediff1d, - intersect1d as intersect1d, - isin as isin, - setdiff1d as setdiff1d, - setxor1d as setxor1d, - union1d as union1d, - unique as unique, - unique_all as unique_all, - unique_counts as unique_counts, - unique_inverse as unique_inverse, - unique_values as unique_values, + ediff1d, + in1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, ) from numpy.lib._function_base_impl import ( - select as select, - piecewise as piecewise, - trim_zeros as trim_zeros, - copy as copy, - iterable as iterable, - percentile as percentile, - diff as diff, - gradient as gradient, - angle as angle, - unwrap as unwrap, - sort_complex as sort_complex, - flip as flip, - rot90 as rot90, - extract as extract, - place as place, - asarray_chkfinite as asarray_chkfinite, - average as average, - bincount as bincount, - digitize as digitize, - cov as cov, - corrcoef as corrcoef, - median as median, - sinc as sinc, - hamming as hamming, - hanning as hanning, - bartlett as bartlett, - blackman as blackman, - kaiser as kaiser, - i0 as i0, - meshgrid as meshgrid, - delete as delete, - insert as insert, - append as append, - interp as interp, - quantile as quantile, - trapezoid as trapezoid, + select, + piecewise, + trim_zeros, + copy, + iterable, + percentile, + diff, + gradient, + angle, + unwrap, + sort_complex, + flip, + rot90, + extract, + place, + asarray_chkfinite, + average, + bincount, + digitize, + cov, + corrcoef, + median, + sinc, + hamming, + hanning, + bartlett, + blackman, + kaiser, + trapezoid, + trapz, + i0, + meshgrid, + delete, + insert, + append, + interp, + quantile, ) +from numpy._globals import _CopyMode + from numpy.lib._histograms_impl import ( - histogram_bin_edges as histogram_bin_edges, - histogram as histogram, - histogramdd as histogramdd, + histogram_bin_edges, + histogram, + histogramdd, ) from numpy.lib._index_tricks_impl import ( - ravel_multi_index as ravel_multi_index, - unravel_index as unravel_index, - mgrid as mgrid, - ogrid as ogrid, - r_ as r_, - c_ as c_, - s_ as s_, - index_exp as index_exp, - ix_ as ix_, - fill_diagonal as fill_diagonal, - diag_indices as diag_indices, - diag_indices_from as diag_indices_from, + ravel_multi_index, + unravel_index, + mgrid, + ogrid, + r_, + c_, + s_, + index_exp, + ix_, + fill_diagonal, + diag_indices, + diag_indices_from, ) from numpy.lib._nanfunctions_impl import ( - nansum as nansum, - nanmax as nanmax, - nanmin as nanmin, - nanargmax as nanargmax, - nanargmin as nanargmin, - nanmean as nanmean, - nanmedian as nanmedian, - nanpercentile as nanpercentile, - nanvar as nanvar, - nanstd as nanstd, - nanprod as nanprod, - nancumsum as nancumsum, - nancumprod as nancumprod, - nanquantile as nanquantile, + nansum, + nanmax, + nanmin, + nanargmax, + nanargmin, + nanmean, + nanmedian, + nanpercentile, + nanvar, + nanstd, + nanprod, + nancumsum, + nancumprod, + nanquantile, ) from numpy.lib._npyio_impl import ( - savetxt as savetxt, - loadtxt as loadtxt, - genfromtxt as genfromtxt, - load as load, - save as save, - savez as savez, - savez_compressed as savez_compressed, - packbits as packbits, - unpackbits as unpackbits, - fromregex as fromregex, + savetxt, + loadtxt, + genfromtxt, + load, + save, + savez, + savez_compressed, + packbits, + unpackbits, + fromregex, ) from numpy.lib._polynomial_impl import ( - poly as poly, - roots as roots, - polyint as polyint, - polyder as polyder, - polyadd as polyadd, - polysub as polysub, - polymul as polymul, - polydiv as polydiv, - polyval as polyval, - polyfit as polyfit, + poly, + roots, + polyint, + polyder, + polyadd, + polysub, + polymul, + polydiv, + polyval, + polyfit, ) from numpy.lib._shape_base_impl import ( - column_stack as column_stack, - dstack as dstack, - array_split as array_split, - split as split, - hsplit as hsplit, - vsplit as vsplit, - dsplit as dsplit, - apply_over_axes as apply_over_axes, - expand_dims as expand_dims, - apply_along_axis as apply_along_axis, - kron as kron, - tile as tile, - take_along_axis as take_along_axis, - put_along_axis as put_along_axis, + column_stack, + dstack, + array_split, + split, + hsplit, + vsplit, + dsplit, + apply_over_axes, + expand_dims, + apply_along_axis, + kron, + tile, + take_along_axis, + put_along_axis, ) from numpy.lib._stride_tricks_impl import ( - broadcast_to as broadcast_to, - broadcast_arrays as broadcast_arrays, - broadcast_shapes as broadcast_shapes, + broadcast_to, + broadcast_arrays, + broadcast_shapes, ) from numpy.lib._twodim_base_impl import ( - diag as diag, - diagflat as diagflat, - eye as eye, - fliplr as fliplr, - flipud as flipud, - tri as tri, - triu as triu, - tril as tril, - vander as vander, - histogram2d as histogram2d, - mask_indices as mask_indices, - tril_indices as tril_indices, - tril_indices_from as tril_indices_from, - triu_indices as triu_indices, - triu_indices_from as triu_indices_from, + diag, + diagflat, + eye, + fliplr, + flipud, + tri, + triu, + tril, + vander, + histogram2d, + mask_indices, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, ) from numpy.lib._type_check_impl import ( - mintypecode as mintypecode, - real as real, - imag as imag, - iscomplex as iscomplex, - isreal as isreal, - iscomplexobj as iscomplexobj, - isrealobj as isrealobj, - nan_to_num as nan_to_num, - real_if_close as real_if_close, - typename as typename, - common_type as common_type, + mintypecode, + real, + imag, + iscomplex, + isreal, + iscomplexobj, + isrealobj, + nan_to_num, + real_if_close, + typename, + common_type, ) from numpy.lib._ufunclike_impl import ( - fix as fix, - isposinf as isposinf, - isneginf as isneginf, + fix, + isposinf, + isneginf, ) from numpy.lib._utils_impl import ( - get_include as get_include, - info as info, - show_runtime as show_runtime, + get_include, + info, + show_runtime, ) from numpy.matrixlib import ( - asmatrix as asmatrix, - bmat as bmat, + asmatrix, + bmat, ) -_AnyStr_contra = TypeVar("_AnyStr_contra", LiteralString, builtins.str, bytes, contravariant=True) - -# Protocol for representing file-like-objects accepted -# by `ndarray.tofile` and `fromfile` -class _IOProtocol(Protocol): - def flush(self) -> object: ... - def fileno(self) -> int: ... - def tell(self) -> SupportsIndex: ... - def seek(self, offset: int, whence: int, /) -> object: ... +__all__ = [ # noqa: RUF022 + # __numpy_submodules__ + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", + + # _core.__all__ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", + "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims", + "memmap", "sctypeDict", "record", "recarray", + + # _core.numeric.__all__ + "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange", + "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros", + "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile", + "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate", + "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type", + "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve", + "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross", + "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", + "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", + "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", + "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", + "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", + "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", + "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape", + "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "trace", "transpose", "var", + "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", + "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj", + "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees", + "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs", + "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp", + "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", + "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", + "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", + "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", + "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", + "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", + "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype", + "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday", + "busdaycalendar", "isdtype", + "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating", + "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32", + "float64", "longdouble", "complex64", "complex128", "clongdouble", + "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte", + "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", + "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", + "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", + "uint128", "uint256", "int128", "int256", "float80", "float96", "float128", + "float256", "complex160", "complex192", "complex256", "complex512", + "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", + "printoptions", "format_float_positional", "format_float_scientific", "require", + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", + # _core.function_base.__all__ + "logspace", "linspace", "geomspace", + # _core.getlimits.__all__ + "finfo", "iinfo", + # _core.shape_base.__all__ + "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack", + "vstack", + # _core.einsumfunc.__all__ + "einsum", "einsum_path", + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", + # lib._histograms_impl.__all__ + "histogram", "histogramdd", "histogram_bin_edges", + # lib._nanfunctions_impl.__all__ + "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian", + "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", + "nanquantile", + # lib._function_base_impl.__all__ + "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", + "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", + "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", + "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", + "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", + # lib._twodim_base_impl.__all__ + "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", + "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", + "triu_indices_from", + # lib._shape_base_impl.__all__ + # NOTE: `row_stack` is omitted because it is deprecated + "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", + "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", + "take_along_axis", "put_along_axis", + # lib._type_check_impl.__all__ + "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", + "real_if_close", "typename", "mintypecode", "common_type", + # lib._arraysetops_impl.__all__ + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", + # lib._ufunclike_impl.__all__ + "fix", "isneginf", "isposinf", + # lib._arraypad_impl.__all__ + "pad", + # lib._utils_impl.__all__ + "get_include", "info", "show_runtime", + # lib._stride_tricks_impl.__all__ + "broadcast_to", "broadcast_arrays", "broadcast_shapes", + # lib._polynomial_impl.__all__ + "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv", + "polyval", "poly1d", "polyfit", + # lib._npyio_impl.__all__ + "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed", + "packbits", "unpackbits", "fromregex", + # lib._index_tricks_impl.__all__ + "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_", + "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", + "diag_indices_from", + + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip + +### Constrained types (for internal use only) +# Only use these for functions; never as generic type parameter. + +_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) +_AnyShapeType = TypeVar( + "_AnyShapeType", + tuple[()], # 0-d + tuple[int], # 1-d + tuple[int, int], # 2-d + tuple[int, int, int], # 3-d + tuple[int, int, int, int], # 4-d + tuple[int, int, int, int, int], # 5-d + tuple[int, int, int, int, int, int], # 6-d + tuple[int, int, int, int, int, int, int], # 7-d + tuple[int, int, int, int, int, int, int, int], # 8-d + tuple[int, ...], # N-d +) +_AnyNBitInexact = TypeVar("_AnyNBitInexact", _NBitHalf, _NBitSingle, _NBitDouble, _NBitLongDouble) +_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) +_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) +_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) +_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) +_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) -# NOTE: `seek`, `write` and `flush` are technically only required -# for `readwrite`/`write` modes -class _MemMapIOProtocol(Protocol): - def flush(self) -> object: ... - def fileno(self) -> SupportsIndex: ... - def tell(self) -> int: ... - def seek(self, offset: int, whence: int, /) -> object: ... - def write(self, s: bytes, /) -> object: ... - @property - def read(self) -> object: ... +### Type parameters (for internal use only) -class _SupportsWrite(Protocol[_AnyStr_contra]): - def write(self, s: _AnyStr_contra, /) -> object: ... +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_RealT_co = TypeVar("_RealT_co", covariant=True) +_ImagT_co = TypeVar("_ImagT_co", covariant=True) -__all__: list[str] -def __dir__() -> Sequence[str]: ... +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) -__version__: LiteralString -__array_api_version__: LiteralString -test: PytestTester +_DType = TypeVar("_DType", bound=dtype[Any]) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) -# TODO: Move placeholders to their respective module once -# their annotations are properly implemented -# -# Placeholders for classes +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_ArrayT_co = TypeVar("_ArrayT_co", bound=NDArray[Any], covariant=True) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer[Any] | np.bool | object_]) +_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating[Any] | integer[Any] | timedelta64 | np.bool | object_]) +_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number[Any] | timedelta64 | object_]) -def show_config() -> None: ... +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, covariant=True) +_1DShapeT = TypeVar("_1DShapeT", bound=_1D) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, covariant=True) +_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], Unpack[tuple[L[1], ...]]]) # (1,) | (1, 1) | (1, 1, 1) | ... -_NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) -_NdArraySubClass_co = TypeVar("_NdArraySubClass_co", bound=NDArray[Any], covariant=True) -_DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) _SCT = TypeVar("_SCT", bound=generic) +_SCT_co = TypeVar("_SCT_co", bound=generic, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number[Any]) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_FloatingT_co = TypeVar("_FloatingT_co", bound=floating[Any], default=floating[Any], covariant=True) +_IntegerT = TypeVar("_IntegerT", bound=integer) +_IntegerT_co = TypeVar("_IntegerT_co", bound=integer[Any], default=integer[Any], covariant=True) + +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) + +_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) +_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=int | float | complex, default=int | float | complex, covariant=True) +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=float | complex, default=float | complex, covariant=True) +_FlexibleItemT_co = TypeVar( + "_FlexibleItemT_co", + bound=_CharLike_co | tuple[Any, ...], + default=_CharLike_co | tuple[Any, ...], + covariant=True, +) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) -_ByteOrderChar: TypeAlias = L[ - "<", # little-endian - ">", # big-endian - "=", # native order - "|", # ignore -] +### Type Aliases (for internal use only) + +_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] +_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_2Tuple: TypeAlias = tuple[_T, _T] + +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] + +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool +_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co + +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +_UnsignedIntegerCType: TypeAlias = type[ + ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 + | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong + | ct.c_size_t | ct.c_void_p +] # fmt: skip +_SignedIntegerCType: TypeAlias = type[ + ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 + | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong + | ct.c_ssize_t +] # fmt: skip +_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] +_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType +_NumberCType: TypeAlias = _IntegerCType | _IntegerCType +_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] + +# some commonly used builtin types that are known to result in a +# `dtype[object_]`, when their *type* is passed to the `dtype` constructor +# NOTE: `builtins.object` should not be included here +_BuiltinObjectLike: TypeAlias = ( + slice | Decimal | Fraction | UUID + | dt.date | dt.time | dt.timedelta | dt.tzinfo + | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] +) # fmt: skip + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype: TypeAlias = dtype[_SCT] + +_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] # can be anything, is case-insensitive, and only the first character matters _ByteOrder: TypeAlias = L[ "S", # swap the current order (default) @@ -669,7 +901,7 @@ _ByteOrder: TypeAlias = L[ ">", "B", "big", # big endian "=", "N", "native", # native order "|", "I", # ignore -] +] # fmt: skip _DTypeKind: TypeAlias = L[ "b", # boolean "i", # signed integer @@ -742,46 +974,363 @@ _DTypeNum: TypeAlias = L[ 256, # user-defined 2056, # StringDType ] -_DTypeBuiltinKind: TypeAlias = L[ - 0, # structured array type, with fields - 1, # compiled into numpy - 2, # user-defined +_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] + +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12"] + +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] + +_OrderKACF: TypeAlias = L[None, "K", "A", "C", "F"] +_OrderACF: TypeAlias = L[None, "A", "C", "F"] +_OrderCF: TypeAlias = L[None, "C", "F"] + +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] +_SortSide: TypeAlias = L["left", "right"] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +if sys.version_info >= (3, 11): + _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +else: + _ConvertibleToComplex: TypeAlias = complex | SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + +_NDIterFlagsKind: TypeAlias = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] +_NDIterFlagsOp: TypeAlias = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked" +] + +_MemMapModeKind: TypeAlias = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", ] +_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] +_NaTValue: TypeAlias = L["NAT","NaT", "nat",b"NAT", b"NaT", b"nat"] + +_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] +_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] +_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] +_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] +_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] +_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] +_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] +_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] + +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + +### Protocols (for internal use only) + +@type_check_only +class _SupportsFileMethods(SupportsFlush, Protocol): + # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` + def fileno(self) -> SupportsIndex: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): + pass + +@type_check_only +class _SupportsItem(Protocol[_T_co]): + def item(self, /) -> _T_co: ... + +@type_check_only +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... + +@type_check_only +class _HasShape(Protocol[_ShapeT_co]): + @property + def shape(self, /) -> _ShapeT_co: ... + +@type_check_only +class _HasShapeAndSupportsItem(_HasShape[_ShapeT_co], _SupportsItem[_T_co], Protocol[_ShapeT_co, _T_co]): + pass + +# matches any `x` on `x.type.item() -> _T_co`, e.g. `dtype[np.int8]` gives `_T_co: int` +@type_check_only +class _HasTypeWithItem(Protocol[_T_co]): + @property + def type(self, /) -> type[_SupportsItem[_T_co]]: ... + +# matches any `x` on `x.shape: _ShapeT_co` and `x.dtype.type.item() -> _T_co`, +# useful for capturing the item-type (`_T_co`) of the scalar-type of an array with +# specific shape (`_ShapeT_co`). +@type_check_only +class _HasShapeAndDTypeWithItem(Protocol[_ShapeT_co, _T_co]): + @property + def shape(self, /) -> _ShapeT_co: ... + @property + def dtype(self, /) -> _HasTypeWithItem[_T_co]: ... + +@type_check_only +class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def real(self, /) -> _RealT_co: ... + @property + def imag(self, /) -> _ImagT_co: ... + +@type_check_only +class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + +@type_check_only +class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + +@type_check_only +class _HasDateAttributes(Protocol): + # The `datetime64` constructors requires an object with the three attributes below, + # and thus supports datetime duck typing + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + + +### Mixins (for internal use only) + +@type_check_only +class _RealMixin: + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + +@type_check_only +class _RoundMixin: + @overload + def __round__(self, /, ndigits: None = None) -> int: ... + @overload + def __round__(self, /, ndigits: SupportsIndex) -> Self: ... + +@type_check_only +class _IntegralMixin(_RealMixin): + @property + def numerator(self) -> Self: ... + @property + def denominator(self) -> L[1]: ... + + def is_integer(self, /) -> L[True]: ... + +### Public API + +__version__: Final[LiteralString] = ... + +e: Final[float] = ... +euler_gamma: Final[float] = ... +pi: Final[float] = ... +inf: Final[float] = ... +nan: Final[float] = ... +little_endian: Final[builtins.bool] = ... +False_: Final[np.bool[L[False]]] = ... +True_: Final[np.bool[L[True]]] = ... +newaxis: Final[None] = None + +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2023.12"]] = "2023.12" +test: Final[PytestTester] = ... + @final -class dtype(Generic[_DTypeScalar_co]): +class dtype(Generic[_SCT_co]): names: None | tuple[builtins.str, ...] def __hash__(self) -> int: ... - # Overload for subclass of generic + + # `None` results in the default dtype + @overload + def __new__( + cls, + dtype: None | type[float64], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ... + ) -> dtype[float64]: ... + + # Overload for `dtype` instances, scalar types, and instances that have a + # `dtype: dtype[_SCT]` attribute @overload def __new__( cls, - dtype: type[_DTypeScalar_co], + dtype: _DTypeLike[_SCT], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_DTypeScalar_co]: ... - # Overloads for string aliases, Python types, and some assorted - # other special cases. Order is sometimes important because of the - # subtype relationships + ) -> dtype[_SCT]: ... + + # Builtin types # - # builtins.bool < int < float < complex < object + # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`, + # even though at runtime `int`, `float`, and `complex` aren't subtypes.. + # This makes it impossible to express e.g. "a float that isn't an int", + # since type checkers treat `_: float` like `_: float | int`. # - # so we have to make sure the overloads for the narrowest type is - # first. - # Builtin types + # For more details, see: + # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251 + # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex + @overload + def __new__( + cls, + dtype: type[builtins.bool | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool]: ... + # NOTE: `_: type[int]` also accepts `type[int | bool]` + @overload + def __new__( + cls, + dtype: type[int | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[int_ | np.bool]: ... + # NOTE: `_: type[float]` also accepts `type[float | int | bool]` + # NOTE: `float64` inherits from `float` at runtime; but this isn't + # reflected in these stubs. So an explicit `float64` is required here. + @overload + def __new__( + cls, + dtype: None | type[float | float64 | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[float64 | int_ | np.bool]: ... + # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` + @overload + def __new__( + cls, + dtype: type[complex | complex128 | float64 | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[bytes], # also includes `type[bytes_]` + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[bytes_]: ... @overload - def __new__(cls, dtype: type[builtins.bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... + def __new__( + cls, + dtype: type[str], # also includes `type[str_]` + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[str_]: ... + # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to + # be run with the (undocumented) `--disable-memoryview-promotion` flag, + # This will be the default in a future mypy release, see: + # https://github.com/python/mypy/issues/15313 + # Pyright / Pylance requires setting `disableBytesTypePromotions=true`, + # which is the default in strict mode @overload - def __new__(cls, dtype: type[int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int_]: ... + def __new__( + cls, + dtype: type[memoryview | void], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[void]: ... + # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, + # and is therefore not included here @overload - def __new__(cls, dtype: None | type[float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + def __new__( + cls, + dtype: type[_BuiltinObjectLike | object_], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[object_]: ... + + # Unions of builtins. @overload - def __new__(cls, dtype: type[complex], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + def __new__( + cls, + dtype: type[bytes | str], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[character]: ... @overload - def __new__(cls, dtype: type[builtins.str], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + def __new__( + cls, + dtype: type[bytes | str | memoryview], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[flexible]: ... @overload - def __new__(cls, dtype: type[bytes], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + def __new__( + cls, + dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... # `unsignedinteger` string-based representations and ctypes @overload @@ -798,7 +1347,6 @@ class dtype(Generic[_DTypeScalar_co]): def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... @overload def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... - # NOTE: We're assuming here that `uint_ptr_t == size_t`, # an assumption that does not hold in rare cases (same for `ssize_t`) @overload @@ -870,63 +1418,134 @@ class dtype(Generic[_DTypeScalar_co]): @overload def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... @overload - def __new__(cls, dtype: _VoidCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... @overload def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... - # dtype of a dtype is the same dtype + # `StringDType` requires special treatment because it has no scalar type + @overload + def __new__( + cls, + dtype: dtypes.StringDType | _StringCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ... + ) -> dtypes.StringDType: ... + + # Combined char-codes and ctypes, analogous to the scalar-type hierarchy @overload def __new__( cls, - dtype: dtype[_DTypeScalar_co], + dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_DTypeScalar_co]: ... + ) -> dtype[unsignedinteger[Any]]: ... @overload def __new__( cls, - dtype: _SupportsDType[dtype[_DTypeScalar_co]], + dtype: _SignedIntegerCodes | _SignedIntegerCType, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ..., - ) -> dtype[_DTypeScalar_co]: ... - # Handle strings that can't be expressed as literals; i.e. s1, s2, ... + ) -> dtype[signedinteger[Any]]: ... @overload def __new__( cls, - dtype: builtins.str, + dtype: _IntegerCodes | _IntegerCType, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ..., - ) -> dtype[Any]: ... - # Catchall overload for void-likes + ) -> dtype[integer[Any]]: ... @overload def __new__( cls, - dtype: _VoidDTypeLike, + dtype: _FloatingCodes | _FloatingCType, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ..., - ) -> dtype[void]: ... - # Catchall overload for object-likes + ) -> dtype[floating[Any]]: ... @overload def __new__( cls, - dtype: type[object], + dtype: _ComplexFloatingCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ..., - ) -> dtype[object_]: ... - - def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... - + ) -> dtype[complexfloating[Any, Any]]: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + def __new__( + cls, + dtype: _InexactCodes | _FloatingCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[inexact[Any]]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype[Any]: ... - - # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + def __new__( + cls, + dtype: _NumberCodes | _NumberCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[number[Any]]: ... + @overload + def __new__( + cls, + dtype: _CharacterCodes | type[ct.c_char], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[character]: ... + @overload + def __new__( + cls, + dtype: _FlexibleCodes | type[ct.c_char], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[flexible]: ... + @overload + def __new__( + cls, + dtype: _GenericCodes | _GenericCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[generic]: ... + + # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[Any]: ... + + # Catch-all overload for object-likes + # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some + # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes + # the subtyping relation, the (gradual) typing analogue of `issubclass()`). + # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + @overload + def __new__( + cls, + dtype: type[object], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_ | Any]: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype[Any]: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload def __mul__(self: _DType, value: L[1], /) -> _DType: ... @overload @@ -991,87 +1610,64 @@ class dtype(Generic[_DTypeScalar_co]): def ndim(self) -> int: ... @property def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... - def newbyteorder(self: _DType, new_order: _ByteOrder = ..., /) -> _DType: ... + def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... @property def str(self) -> LiteralString: ... @property - def type(self) -> type[_DTypeScalar_co]: ... + def type(self) -> type[_SCT_co]: ... -_ArrayLikeInt: TypeAlias = ( - int - | integer[Any] - | Sequence[int | integer[Any]] - | Sequence[Sequence[Any]] # TODO: wait for support for recursive types - | NDArray[Any] -) - -_FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) -_FlatShapeType = TypeVar("_FlatShapeType", bound=tuple[int]) @final -class flatiter(Generic[_NdArraySubClass_co]): +class flatiter(Generic[_ArrayT_co]): __hash__: ClassVar[None] @property - def base(self) -> _NdArraySubClass_co: ... + def base(self) -> _ArrayT_co: ... @property def coords(self) -> _Shape: ... @property def index(self) -> int: ... - def copy(self) -> _NdArraySubClass_co: ... - def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... - def __next__(self: flatiter[NDArray[_ScalarType]]) -> _ScalarType: ... + def copy(self) -> _ArrayT_co: ... + def __iter__(self) -> Self: ... + def __next__(self: flatiter[NDArray[_SCT]]) -> _SCT: ... def __len__(self) -> int: ... @overload def __getitem__( - self: flatiter[NDArray[_ScalarType]], + self: flatiter[NDArray[_SCT]], key: int | integer[Any] | tuple[int | integer[Any]], - ) -> _ScalarType: ... + ) -> _SCT: ... @overload def __getitem__( self, - key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], - ) -> _NdArraySubClass_co: ... + key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], + ) -> _ArrayT_co: ... # TODO: `__setitem__` operates via `unsafe` casting rules, and can # thus accept any type accepted by the relevant underlying `np.generic` # constructor. # This means that `value` must in reality be a supertype of `npt.ArrayLike`. def __setitem__( self, - key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], + key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], value: Any, ) -> None: ... @overload - def __array__(self: flatiter[ndarray[_FlatShapeType, _DType]], dtype: None = ..., /) -> ndarray[_FlatShapeType, _DType]: ... + def __array__(self: flatiter[ndarray[_1DShapeT, _DType]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DType]: ... @overload - def __array__(self: flatiter[ndarray[_FlatShapeType, Any]], dtype: _DType, /) -> ndarray[_FlatShapeType, _DType]: ... + def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DType, /) -> ndarray[_1DShapeT, _DType]: ... @overload - def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... + def __array__(self: flatiter[ndarray[_Shape, _DType]], dtype: None = ..., /) -> ndarray[_Shape, _DType]: ... @overload - def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... - -_OrderKACF: TypeAlias = L[None, "K", "A", "C", "F"] -_OrderACF: TypeAlias = L[None, "A", "C", "F"] -_OrderCF: TypeAlias = L[None, "C", "F"] - -_ModeKind: TypeAlias = L["raise", "wrap", "clip"] -_PartitionKind: TypeAlias = L["introselect"] -# in practice, only the first case-insensitive character is considered (so e.g. -# "QuantumSort3000" will be interpreted as quicksort). -_SortKind: TypeAlias = L[ - "Q", "quick", "quicksort", - "M", "merge", "mergesort", - "H", "heap", "heapsort", - "S", "stable", "stablesort", -] -_SortSide: TypeAlias = L["left", "right"] - -_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) + def __array__(self, dtype: _DType, /) -> ndarray[_Shape, _DType]: ... +@type_check_only class _ArrayOrScalarCommon: @property - def T(self: _ArraySelf) -> _ArraySelf: ... + def real(self, /) -> Any: ... + @property + def imag(self, /) -> Any: ... @property - def mT(self: _ArraySelf) -> _ArraySelf: ... + def T(self) -> Self: ... + @property + def mT(self) -> Self: ... @property def data(self) -> memoryview: ... @property @@ -1082,39 +1678,36 @@ class _ArrayOrScalarCommon: def nbytes(self) -> int: ... @property def device(self) -> L["cpu"]: ... - def __bool__(self) -> builtins.bool: ... - def __bytes__(self) -> bytes: ... - def __str__(self) -> str: ... - def __repr__(self) -> str: ... - def __copy__(self: _ArraySelf) -> _ArraySelf: ... - def __deepcopy__(self: _ArraySelf, memo: None | dict[int, Any], /) -> _ArraySelf: ... + + def __bool__(self, /) -> builtins.bool: ... + def __int__(self, /) -> int: ... + def __float__(self, /) -> float: ... + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: None | dict[int, Any], /) -> Self: ... # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 def __eq__(self, other: Any, /) -> Any: ... def __ne__(self, other: Any, /) -> Any: ... - def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... - def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... + + def copy(self, order: _OrderKACF = ...) -> Self: ... + def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... def tobytes(self, order: _OrderKACF = ...) -> bytes: ... # NOTE: `tostring()` is deprecated and therefore excluded # def tostring(self, order=...): ... - def tofile( - self, - fid: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _IOProtocol, - sep: str = ..., - format: str = ..., - ) -> None: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... # generics and 0d arrays return builtin scalars def tolist(self) -> Any: ... + def to_device(self, device: L["cpu"], /, *, stream: None | int | Any = ...) -> Self: ... @property def __array_interface__(self) -> dict[str, Any]: ... @property def __array_priority__(self) -> float: ... @property - def __array_struct__(self) -> Any: ... # builtins.PyCapsule - def __array_namespace__(self, *, api_version: None | _ArrayAPIVersion = ...) -> Any: ... + def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape @@ -1122,113 +1715,9 @@ class _ArrayOrScalarCommon: np.bool, # F-continuous bytes | list[Any], # Data ], /) -> None: ... - # an `np.bool` is returned when `keepdims=True` and `self` is a 0d array - - @overload - def all( - self, - axis: None = ..., - out: None = ..., - keepdims: L[False] = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> np.bool: ... - @overload - def all( - self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - @overload - def all( - self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def any( - self, - axis: None = ..., - out: None = ..., - keepdims: L[False] = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> np.bool: ... - @overload - def any( - self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> Any: ... - @overload - def any( - self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def argmax( - self, - axis: None = ..., - out: None = ..., - *, - keepdims: L[False] = ..., - ) -> intp: ... - @overload - def argmax( - self, - axis: SupportsIndex = ..., - out: None = ..., - *, - keepdims: builtins.bool = ..., - ) -> Any: ... - @overload - def argmax( - self, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - *, - keepdims: builtins.bool = ..., - ) -> _NdArraySubClass: ... - @overload - def argmin( - self, - axis: None = ..., - out: None = ..., - *, - keepdims: L[False] = ..., - ) -> intp: ... - @overload - def argmin( - self, - axis: SupportsIndex = ..., - out: None = ..., - *, - keepdims: builtins.bool = ..., - ) -> Any: ... - @overload - def argmin( - self, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - *, - keepdims: builtins.bool = ..., - ) -> _NdArraySubClass: ... + def conj(self) -> Self: ... + def conjugate(self) -> Self: ... def argsort( self, @@ -1239,321 +1728,322 @@ class _ArrayOrScalarCommon: stable: None | bool = ..., ) -> NDArray[Any]: ... + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmax(self, /, axis: SupportsIndex | None, out: _ArrayT, *, keepdims: builtins.bool = False) -> _ArrayT: ... @overload - def choose( - self, - choices: ArrayLike, - out: None = ..., - mode: _ModeKind = ..., - ) -> NDArray[Any]: ... + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _ArrayT, keepdims: builtins.bool = False) -> _ArrayT: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmin(self, /, axis: SupportsIndex | None, out: _ArrayT, *, keepdims: builtins.bool = False) -> _ArrayT: ... @overload - def choose( - self, - choices: ArrayLike, - out: _NdArraySubClass = ..., - mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _ArrayT, keepdims: builtins.bool = False) -> _ArrayT: ... + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... @overload - def clip( - self, - min: ArrayLike = ..., - max: None | ArrayLike = ..., - out: None = ..., - **kwargs: Any, - ) -> NDArray[Any]: ... + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out=None (default) + def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... + @overload # out=ndarray + def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + + # TODO: Annotate kwargs with an unpacked `TypedDict` + @overload # out: None (default) + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload - def clip( - self, - min: None = ..., - max: ArrayLike = ..., - out: None = ..., - **kwargs: Any, - ) -> NDArray[Any]: ... + def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... @overload - def clip( - self, - min: ArrayLike = ..., - max: None | ArrayLike = ..., - out: _NdArraySubClass = ..., - **kwargs: Any, - ) -> _NdArraySubClass: ... + def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload # out: ndarray + def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... @overload - def clip( - self, - min: None = ..., - max: ArrayLike = ..., - out: _NdArraySubClass = ..., - **kwargs: Any, - ) -> _NdArraySubClass: ... - + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... @overload - def compress( - self, - a: ArrayLike, - axis: None | SupportsIndex = ..., - out: None = ..., - ) -> NDArray[Any]: ... + def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... @overload - def compress( - self, - a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... - - def conj(self: _ArraySelf) -> _ArraySelf: ... - - def conjugate(self: _ArraySelf) -> _ArraySelf: ... + def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... @overload - def cumprod( - self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., - ) -> NDArray[Any]: ... + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... @overload - def cumprod( - self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def cumsum( - self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: None = ..., - ) -> NDArray[Any]: ... + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... @overload - def cumsum( - self, - axis: None | SupportsIndex = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... @overload def max( self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, ) -> Any: ... @overload def max( self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def mean( - self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def mean( + def max( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None = None, *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload def min( self, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, ) -> Any: ... @overload def min( self, - axis: None | _ShapeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - - @overload - def prod( - self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def prod( + def min( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def round( - self: _ArraySelf, - decimals: SupportsIndex = ..., - out: None = ..., - ) -> _ArraySelf: ... - @overload - def round( + def sum( self, - decimals: SupportsIndex = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... - + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> Any: ... @overload - def std( + def sum( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., - *, - where: _ArrayLikeBool_co = ..., - ) -> Any: ... + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def std( + def sum( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def sum( + def prod( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, ) -> Any: ... @overload - def sum( + def prod( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - keepdims: builtins.bool = ..., - initial: _NumberLike_co = ..., - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def var( + def mean( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: None = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, ) -> Any: ... @overload - def var( + def mean( self, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ddof: float = ..., - keepdims: builtins.bool = ..., + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, *, - where: _ArrayLikeBool_co = ..., - ) -> _NdArraySubClass: ... - -_DType = TypeVar("_DType", bound=dtype[Any]) -_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) -_FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) - -_ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) -_ShapeType2 = TypeVar("_ShapeType2", bound=tuple[int, ...]) -_Shape2DType_co = TypeVar("_Shape2DType_co", covariant=True, bound=tuple[int, int]) -_NumberType = TypeVar("_NumberType", bound=number[Any]) - -if sys.version_info >= (3, 12): - from collections.abc import Buffer as _SupportsBuffer -else: - _SupportsBuffer: TypeAlias = ( - bytes - | bytearray - | memoryview - | _array.array[Any] - | mmap.mmap - | NDArray[Any] - | generic - ) - -_T = TypeVar("_T") -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) -_2Tuple: TypeAlias = tuple[_T, _T] -_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] - -_ArrayUInt_co: TypeAlias = NDArray[np.bool | unsignedinteger[Any]] -_ArrayInt_co: TypeAlias = NDArray[np.bool | integer[Any]] -_ArrayFloat_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any]] -_ArrayComplex_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] -_ArrayNumber_co: TypeAlias = NDArray[np.bool | number[Any]] -_ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] - -# Introduce an alias for `dtype` to avoid naming conflicts. -_dtype: TypeAlias = dtype[_ScalarType] - -if sys.version_info >= (3, 13): - from types import CapsuleType as _PyCapsule -else: - _PyCapsule: TypeAlias = Any - -_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12"] - -class _SupportsItem(Protocol[_T_co]): - def item(self, args: Any, /) -> _T_co: ... - -class _SupportsReal(Protocol[_T_co]): - @property - def real(self) -> _T_co: ... + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... -class _SupportsImag(Protocol[_T_co]): - @property - def imag(self) -> _T_co: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): - __hash__: ClassVar[None] + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property def base(self) -> None | NDArray[Any]: ... @property @@ -1561,26 +2051,23 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @property def size(self) -> int: ... @property - def real( - self: ndarray[_ShapeType_co, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType_co, _dtype[_ScalarType]]: ... + def real(self: _HasDTypeWithRealAndImag[_SCT, object], /) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... @real.setter - def real(self, value: ArrayLike) -> None: ... + def real(self, value: ArrayLike, /) -> None: ... @property - def imag( - self: ndarray[_ShapeType_co, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType_co, _dtype[_ScalarType]]: ... + def imag(self: _HasDTypeWithRealAndImag[object, _SCT], /) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... @imag.setter - def imag(self, value: ArrayLike) -> None: ... + def imag(self, value: ArrayLike, /) -> None: ... + def __new__( - cls: type[_ArraySelf], + cls, shape: _ShapeLike, dtype: DTypeLike = ..., buffer: None | _SupportsBuffer = ..., offset: SupportsIndex = ..., strides: None | _ShapeLike = ..., order: _OrderKACF = ..., - ) -> _ArraySelf: ... + ) -> Self: ... if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... @@ -1590,11 +2077,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __array__( self, dtype: None = ..., /, *, copy: None | bool = ... - ) -> ndarray[_ShapeType_co, _DType_co]: ... + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload def __array__( self, dtype: _DType, /, *, copy: None | bool = ... - ) -> ndarray[_ShapeType_co, _DType]: ... + ) -> ndarray[_ShapeT_co, _DType]: ... def __array_ufunc__( self, @@ -1619,61 +2106,108 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): def __array_wrap__( self, - array: ndarray[_ShapeType2, _DType], + array: ndarray[_ShapeT, _DType], context: None | tuple[ufunc, tuple[Any, ...], int] = ..., return_scalar: builtins.bool = ..., /, - ) -> ndarray[_ShapeType2, _DType]: ... + ) -> ndarray[_ShapeT, _DType]: ... @overload - def __getitem__(self, key: ( - NDArray[integer[Any]] - | NDArray[np.bool] - | tuple[NDArray[integer[Any]] | NDArray[np.bool], ...] - )) -> ndarray[Any, _DType_co]: ... + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_Shape, _DType_co]: ... @overload - def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...]) -> Any: ... + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload - def __getitem__(self, key: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> ndarray[Any, _DType_co]: ... + def __getitem__(self, key: _ToIndices, /) -> ndarray[_Shape, _DType_co]: ... @overload - def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype[Any]]: ... @overload - def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType_co, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... @property def ctypes(self) -> _ctypes[int]: ... @property - def shape(self) -> _ShapeType_co: ... + def shape(self) -> _ShapeT_co: ... @shape.setter def shape(self, value: _ShapeLike) -> None: ... @property def strides(self) -> _Shape: ... @strides.setter def strides(self, value: _ShapeLike) -> None: ... - def byteswap(self: _ArraySelf, inplace: builtins.bool = ...) -> _ArraySelf: ... + def byteswap(self, inplace: builtins.bool = ...) -> Self: ... def fill(self, value: Any) -> None: ... @property - def flat(self: _NdArraySubClass) -> flatiter[_NdArraySubClass]: ... + def flat(self) -> flatiter[Self]: ... - # Use the same output type as that of the underlying `generic` + @overload # special casing for `StringDType`, which has no scalar type + def item(self: ndarray[Any, dtypes.StringDType], /) -> str: ... @overload - def item( - self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] - *args: SupportsIndex, - ) -> _T: ... + def item(self: ndarray[Any, dtypes.StringDType], arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /) -> str: ... @overload - def item( - self: ndarray[Any, _dtype[_SupportsItem[_T]]], # type: ignore[type-var] - args: tuple[SupportsIndex, ...], - /, - ) -> _T: ... + def item(self: ndarray[Any, dtypes.StringDType], /, *args: SupportsIndex) -> str: ... + @overload # use the same output type as that of the underlying `generic` + def item(self: _HasShapeAndDTypeWithItem[Any, _T], /) -> _T: ... + @overload + def item(self: _HasShapeAndDTypeWithItem[Any, _T], arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /) -> _T: ... + @overload + def item(self: _HasShapeAndDTypeWithItem[Any, _T], /, *args: SupportsIndex) -> _T: ... + + @overload + def tolist(self: _HasShapeAndSupportsItem[tuple[()], _T], /) -> _T: ... + @overload + def tolist(self: _HasShapeAndSupportsItem[tuple[int], _T], /) -> list[_T]: ... + @overload + def tolist(self: _HasShapeAndSupportsItem[tuple[int, int], _T], /) -> list[list[_T]]: ... + @overload + def tolist(self: _HasShapeAndSupportsItem[tuple[int, int, int], _T], /) -> list[list[list[_T]]]: ... + @overload + def tolist(self: _HasShapeAndSupportsItem[Any, _T], /) -> _T | list[_T] | list[list[_T]] | list[list[list[Any]]]: ... @overload def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... @@ -1687,18 +2221,92 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): def squeeze( self, axis: None | SupportsIndex | tuple[SupportsIndex, ...] = ..., - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_Shape, _DType_co]: ... def swapaxes( self, axis1: SupportsIndex, axis2: SupportsIndex, - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_Shape, _DType_co]: ... @overload - def transpose(self: _ArraySelf, axes: None | _ShapeLike, /) -> _ArraySelf: ... + def transpose(self, axes: None | _ShapeLike, /) -> Self: ... + @overload + def transpose(self, *axes: SupportsIndex) -> Self: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def all( + self, + axis: None | int | tuple[int, ...] = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def all( + self, + axis: None | int | tuple[int, ...], + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def all( + self, + axis: None | int | tuple[int, ...] = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def any( + self, + axis: None | int | tuple[int, ...] = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def any( + self, + axis: None | int | tuple[int, ...], + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... @overload - def transpose(self: _ArraySelf, *axes: SupportsIndex) -> _ArraySelf: ... + def any( + self, + axis: None | int | tuple[int, ...] = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... def argpartition( self, @@ -1713,7 +2321,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_Shape, _DType_co]: ... # 1D + 1D returns a scalar; # all other with at least 1 non-0D array return an ndarray. @@ -1722,7 +2330,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] @overload - def dot(self, b: ArrayLike, out: _NdArraySubClass) -> _NdArraySubClass: ... + def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... # `nonzero()` is deprecated for 0d arrays/generics def nonzero(self) -> tuple[NDArray[intp], ...]: ... @@ -1791,17 +2399,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., - out: _NdArraySubClass = ..., - ) -> _NdArraySubClass: ... + out: _ArrayT = ..., + ) -> _ArrayT: ... @overload def take( # type: ignore[misc] - self: NDArray[_ScalarType], + self: NDArray[_SCT], indices: _IntLike_co, axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., - ) -> _ScalarType: ... + ) -> _SCT: ... @overload def take( # type: ignore[misc] self, @@ -1809,60 +2417,116 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_Shape, _DType_co]: ... @overload def take( self, indices: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., + out: _ArrayT = ..., mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... + ) -> _ArrayT: ... def repeat( self, repeats: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_Shape, _DType_co]: ... - # TODO: use `tuple[int]` as shape type once covariant (#26081) - def flatten( - self, - order: _OrderKACF = ..., - ) -> ndarray[Any, _DType_co]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DType_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DType_co]: ... - # TODO: use `tuple[int]` as shape type once covariant (#26081) - def ravel( + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive self, - order: _OrderKACF = ..., - ) -> ndarray[Any, _DType_co]: ... - - @overload + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[()], _DType_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d def reshape( self, - shape: _ShapeLike, + shape: _AnyShapeType, /, *, - order: _OrderACF = ..., - copy: None | bool = ..., - ) -> ndarray[Any, _DType_co]: ... - @overload + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShapeType, _DType_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int], _DType_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int], _DType_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int], _DType_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _DType_co]: ... + @overload # (int, *(index, ...)) def reshape( self, + size0: SupportsIndex, + /, *shape: SupportsIndex, - order: _OrderACF = ..., - copy: None | bool = ..., - ) -> ndarray[Any, _DType_co]: ... + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_Shape, _DType_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_Shape, _DType_co]: ... @overload def astype( self, - dtype: _DTypeLike[_ScalarType], + dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[_ScalarType]: ... + ) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... @overload def astype( self, @@ -1871,29 +2535,29 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[Any]: ... + ) -> ndarray[_ShapeT_co, dtype[Any]]: ... @overload - def view(self: _ArraySelf) -> _ArraySelf: ... + def view(self) -> Self: ... @overload - def view(self, type: type[_NdArraySubClass]) -> _NdArraySubClass: ... + def view(self, type: type[_ArrayT]) -> _ArrayT: ... @overload - def view(self, dtype: _DTypeLike[_ScalarType]) -> NDArray[_ScalarType]: ... + def view(self, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]: ... @overload def view(self, dtype: DTypeLike) -> NDArray[Any]: ... @overload def view( self, dtype: DTypeLike, - type: type[_NdArraySubClass], - ) -> _NdArraySubClass: ... + type: type[_ArrayT], + ) -> _ArrayT: ... @overload def getfield( self, - dtype: _DTypeLike[_ScalarType], + dtype: _DTypeLike[_SCT], offset: SupportsIndex = ... - ) -> NDArray[_ScalarType]: ... + ) -> NDArray[_SCT]: ... @overload def getfield( self, @@ -1901,27 +2565,22 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): offset: SupportsIndex = ... ) -> NDArray[Any]: ... - # Dispatch to the underlying `generic` via protocols - def __int__( - self: NDArray[SupportsInt], # type: ignore[type-var] - ) -> int: ... - - def __float__( - self: NDArray[SupportsFloat], # type: ignore[type-var] - ) -> float: ... - - def __complex__( - self: NDArray[SupportsComplex], # type: ignore[type-var] - ) -> complex: ... - - def __index__( - self: NDArray[SupportsIndex], # type: ignore[type-var] - ) -> int: ... + def __index__(self: NDArray[np.integer[Any]], /) -> int: ... + def __int__(self: NDArray[number[Any] | np.timedelta64 | np.bool | object_], /) -> int: ... + def __float__(self: NDArray[number[Any] | np.timedelta64 | np.bool | object_], /) -> float: ... + def __complex__(self: NDArray[number[Any] | np.bool | object_], /) -> complex: ... def __len__(self) -> int: ... - def __setitem__(self, key, value): ... - def __iter__(self) -> Any: ... - def __contains__(self, key) -> builtins.bool: ... + def __contains__(self, value: object, /) -> builtins.bool: ... + + @overload # == 1-d & object_ + def __iter__(self: ndarray[tuple[int], dtype[object_]], /) -> Iterator[Any]: ... + @overload # == 1-d + def __iter__(self: ndarray[tuple[int], dtype[_SCT]], /) -> Iterator[_SCT]: ... + @overload # >= 2-d + def __iter__(self: ndarray[tuple[int, int, Unpack[tuple[int, ...]]], dtype[_SCT]], /) -> Iterator[NDArray[_SCT]]: ... + @overload # ?-d + def __iter__(self, /) -> Iterator[Any]: ... # The last overload is for catching recursive objects whose # nesting is too deep. @@ -1974,157 +2633,208 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... # Unary ops - @overload - def __abs__(self: NDArray[_UnknownType]) -> NDArray[Any]: ... - @overload - def __abs__(self: NDArray[np.bool]) -> NDArray[np.bool]: ... - @overload - def __abs__(self: NDArray[complexfloating[_NBit1, _NBit1]]) -> NDArray[floating[_NBit1]]: ... - @overload - def __abs__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... - @overload - def __abs__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... - @overload - def __abs__(self: NDArray[object_]) -> Any: ... + # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed + # @overload + # def __abs__(self: ndarray[_ShapeType, dtypes.Complex64DType], /) -> ndarray[_ShapeType, dtypes.Float32DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeType, dtypes.Complex128DType], /) -> ndarray[_ShapeType, dtypes.Float64DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeType, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeType, dtypes.LongDoubleDType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeType, dtype[complex128]], /) -> ndarray[_ShapeType, dtype[float64]]: ... @overload - def __invert__(self: NDArray[_UnknownType]) -> NDArray[Any]: ... - @overload - def __invert__(self: NDArray[np.bool]) -> NDArray[np.bool]: ... + def __abs__( + self: ndarray[_ShapeT, dtype[complexfloating[_AnyNBitInexact]]], / + ) -> ndarray[_ShapeT, dtype[floating[_AnyNBitInexact]]]: ... @overload - def __invert__(self: NDArray[_IntType]) -> NDArray[_IntType]: ... - @overload - def __invert__(self: NDArray[object_]) -> Any: ... + def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + + # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case @overload - def __pos__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... - @overload - def __pos__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... @overload - def __pos__(self: NDArray[object_]) -> Any: ... - + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __neg__(self: NDArray[_NumberType]) -> NDArray[_NumberType]: ... + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __neg__(self: NDArray[timedelta64]) -> NDArray[timedelta64]: ... + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __neg__(self: NDArray[object_]) -> Any: ... - - # Binary ops + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... @overload - def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[overload-overlap] @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> _2Tuple[NDArray[Any]]: ... + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __divmod__(self: NDArray[floating[_64Bit]], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> _2Tuple[NDArray[Any]]: ... + def __rdivmod__(self: NDArray[floating[_64Bit]], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __add__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload @@ -2134,22 +2844,34 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __radd__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload @@ -2160,21 +2882,33 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __sub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload @@ -2185,23 +2919,35 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2210,175 +2956,253 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] @overload - def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... + def __truediv__(self: _ArrayInt_co, other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rtruediv__(self: _ArrayInt_co, other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... @overload - def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __floordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rfloordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __pow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __pow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rpow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload @@ -2390,8 +3214,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload @@ -2403,8 +3225,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload @@ -2416,8 +3236,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload @@ -2429,8 +3247,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload @@ -2442,8 +3258,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload @@ -2455,8 +3269,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload @@ -2468,8 +3280,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload @@ -2481,8 +3291,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload @@ -2494,8 +3302,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): @overload def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... - @overload - def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload @@ -2513,207 +3319,249 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): # operand. An exception to this rule are unsigned integers though, which # also accepts a signed integer for the right operand as long it is a 0D # object and its value is >= 0 + # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't + # work, as this will lead to `false negatives` when using these inplace ops. @overload - def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + @overload + def __iadd__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __iadd__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __iadd__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __iadd__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iadd__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __isub__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... + @overload + def __isub__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __isub__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __isub__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __isub__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __isub__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __isub__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + @overload + def __imul__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __imul__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imul__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __imul__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __imul__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imul__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imul__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __itruediv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __itruediv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __itruediv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __itruediv__( + self: NDArray[complexfloating[Any]], + other: _ArrayLikeComplex_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co, /) -> NDArray[timedelta64]: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __ifloordiv__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __ifloordiv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __ifloordiv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __ifloordiv__( + self: NDArray[complexfloating[Any]], + other: _ArrayLikeComplex_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co, /) -> NDArray[timedelta64]: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __ipow__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... + @overload + def __ipow__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __ipow__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __imod__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imod__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __imod__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __imod__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __imod__( + self: NDArray[timedelta64], + other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imod__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... - @overload - def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ilshift__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ilshift__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... - @overload - def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __irshift__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __irshift__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... - @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iand__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __iand__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __iand__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... - @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ixor__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ixor__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... - @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ior__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, + /, + ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __ior__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ior__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... + def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + @overload + def __imatmul__(self: NDArray[unsignedinteger[Any]], other: _ArrayLikeUInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... + def __imatmul__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imatmul__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... + def __imatmul__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... + def __imatmul__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imatmul__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __imatmul__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... + def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... def __dlpack__( self: NDArray[number[Any]], + /, *, - stream: int | Any | None = ..., - max_version: tuple[int, int] | None = ..., - dl_device: tuple[int, L[0]] | None = ..., - copy: bool | None = ..., - ) -> _PyCapsule: ... - - def __dlpack_device__(self) -> tuple[int, L[0]]: ... - - @overload - def to_device(self: NDArray[_SCT], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[_SCT]: ... - @overload - def to_device(self: NDArray[Any], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... - - def bitwise_count( - self, - out: None | NDArray[Any] = ..., - *, - where: _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: builtins.bool = ..., - ) -> NDArray[Any]: ... + stream: int | Any | None = None, + max_version: tuple[int, int] | None = None, + dl_device: tuple[int, int] | None = None, + copy: builtins.bool | None = None, + ) -> CapsuleType: ... + def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property @@ -2724,22 +3572,18 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): # the creation of `np.generic` instances. # The `# type: ignore` comments are necessary to silence mypy errors regarding # the missing `ABCMeta` metaclass. - # See https://github.com/numpy/numpy-stubs/pull/80 for more details. - -_ScalarType = TypeVar("_ScalarType", bound=generic) -_NBit1 = TypeVar("_NBit1", bound=NBitBase) -_NBit2 = TypeVar("_NBit2", bound=NBitBase) - -class generic(_ArrayOrScalarCommon): +class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): @abstractmethod def __init__(self, *args: Any, **kwargs: Any) -> None: ... - # TODO: use `tuple[()]` as shape type once covariant (#26081) + def __hash__(self) -> int: ... @overload - def __array__(self: _ScalarType, dtype: None = ..., /) -> NDArray[_ScalarType]: ... + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... @overload - def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... - def __hash__(self) -> int: ... + def __array__(self, dtype: _DType, /) -> ndarray[tuple[()], _DType]: ... + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + @property def base(self) -> None: ... @property @@ -2750,24 +3594,26 @@ class generic(_ArrayOrScalarCommon): def shape(self) -> tuple[()]: ... @property def strides(self) -> tuple[()]: ... - def byteswap(self: _ScalarType, inplace: L[False] = ...) -> _ScalarType: ... @property - def flat(self: _ScalarType) -> flatiter[NDArray[_ScalarType]]: ... + def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... - if sys.version_info >= (3, 12): - def __buffer__(self, flags: int, /) -> memoryview: ... + @overload + def item(self, /) -> _ItemT_co: ... + @overload + def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + def tolist(self, /) -> _ItemT_co: ... - def to_device(self: _ScalarType, device: L["cpu"], /, *, stream: None | int | Any = ...) -> _ScalarType: ... + def byteswap(self, inplace: L[False] = ...) -> Self: ... @overload def astype( self, - dtype: _DTypeLike[_ScalarType], + dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> _ScalarType: ... + ) -> _SCT: ... @overload def astype( self, @@ -2781,16 +3627,13 @@ class generic(_ArrayOrScalarCommon): # NOTE: `view` will perform a 0D->scalar cast, # thus the array `type` is irrelevant to the output type @overload - def view( - self: _ScalarType, - type: type[NDArray[Any]] = ..., - ) -> _ScalarType: ... + def view(self, type: type[NDArray[Any]] = ...) -> Self: ... @overload def view( self, - dtype: _DTypeLike[_ScalarType], + dtype: _DTypeLike[_SCT], type: type[NDArray[Any]] = ..., - ) -> _ScalarType: ... + ) -> _SCT: ... @overload def view( self, @@ -2801,9 +3644,9 @@ class generic(_ArrayOrScalarCommon): @overload def getfield( self, - dtype: _DTypeLike[_ScalarType], + dtype: _DTypeLike[_SCT], offset: SupportsIndex = ... - ) -> _ScalarType: ... + ) -> _SCT: ... @overload def getfield( self, @@ -2811,92 +3654,196 @@ class generic(_ArrayOrScalarCommon): offset: SupportsIndex = ... ) -> Any: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> Any: ... - @overload def take( # type: ignore[misc] - self: _ScalarType, + self, indices: _IntLike_co, axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., - ) -> _ScalarType: ... + ) -> Self: ... @overload def take( # type: ignore[misc] - self: _ScalarType, + self, indices: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., out: None = ..., mode: _ModeKind = ..., - ) -> NDArray[_ScalarType]: ... + ) -> NDArray[Self]: ... @overload def take( self, indices: _ArrayLikeInt_co, axis: None | SupportsIndex = ..., - out: _NdArraySubClass = ..., + out: _ArrayT = ..., mode: _ModeKind = ..., - ) -> _NdArraySubClass: ... + ) -> _ArrayT: ... - def repeat( - self: _ScalarType, - repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - ) -> NDArray[_ScalarType]: ... + def repeat(self, repeats: _ArrayLikeInt_co, axis: None | SupportsIndex = ...) -> NDArray[Self]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... - def flatten( - self: _ScalarType, - order: _OrderKACF = ..., - ) -> NDArray[_ScalarType]: ... + @overload # (() | []) + def reshape( + self, + shape: tuple[()] | list[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self: ... + @overload # ((1, *(1, ...))@_ShapeType) + def reshape( + self, + shape: _1NShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_1NShapeT, dtype[Self]]: ... + @overload # (Sequence[index, ...]) # not recommended + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + @overload # _(index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + @overload # _(index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index, index, *index) # ndim >= 5 + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + size5: SupportsIndex, + /, + *sizes6_: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], Unpack[tuple[L[1], ...]]], dtype[Self]]: ... - def ravel( - self: _ScalarType, - order: _OrderKACF = ..., - ) -> NDArray[_ScalarType]: ... + def squeeze(self, axis: None | L[0] | tuple[()] = ...) -> Self: ... + def transpose(self, axes: None | tuple[()] = ..., /) -> Self: ... @overload - def reshape( - self: _ScalarType, shape: _ShapeLike, /, *, order: _OrderACF = ... - ) -> NDArray[_ScalarType]: ... + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... @overload - def reshape( - self: _ScalarType, *shape: SupportsIndex, order: _OrderACF = ... - ) -> NDArray[_ScalarType]: ... + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_SCT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _SCT: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_SCT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _SCT: ... - def bitwise_count( + @overload + def any( self, - out: None | NDArray[Any] = ..., + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, *, - where: _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: builtins.bool = ..., - ) -> Any: ... + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_SCT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _SCT: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_SCT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _SCT: ... - def squeeze( - self: _ScalarType, axis: None | L[0] | tuple[()] = ... - ) -> _ScalarType: ... - def transpose(self: _ScalarType, axes: None | tuple[()] = ..., /) -> _ScalarType: ... # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` @property - def dtype(self: _ScalarType) -> _dtype[_ScalarType]: ... + def dtype(self) -> _dtype[Self]: ... -class number(generic, Generic[_NBit1]): # type: ignore - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... +class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): + @abstractmethod + def __init__(self, value: _NumberItemT_co, /) -> None: ... def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... - def __neg__(self: _ArraySelf) -> _ArraySelf: ... - def __pos__(self: _ArraySelf) -> _ArraySelf: ... - def __abs__(self: _ArraySelf) -> _ArraySelf: ... - # Ensure that objects annotated as `number` support arithmetic operations + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + def __abs__(self) -> Self: ... + __add__: _NumberOp __radd__: _NumberOp __sub__: _NumberOp @@ -2909,149 +3856,158 @@ class number(generic, Generic[_NBit1]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] -class bool(generic): - def __init__(self, value: object = ..., /) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> builtins.bool: ... - def tolist(self) -> builtins.bool: ... - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... - def __abs__(self: _ArraySelf) -> _ArraySelf: ... +class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): + @property + def itemsize(self) -> L[1]: ... + @property + def nbytes(self) -> L[1]: ... + @property + def real(self) -> Self: ... + @property + def imag(self) -> np.bool[L[False]]: ... + + @overload + def __init__(self: np.bool[L[False]], /) -> None: ... + @overload + def __init__(self: np.bool[L[False]], value: _Falsy = ..., /) -> None: ... + @overload + def __init__(self: np.bool[L[True]], value: _Truthy, /) -> None: ... + @overload + def __init__(self, value: object, /) -> None: ... + + def __bool__(self, /) -> _BoolItemT_co: ... + @overload + def __int__(self: np.bool[L[False]], /) -> L[0]: ... + @overload + def __int__(self: np.bool[L[True]], /) -> L[1]: ... + @overload + def __int__(self, /) -> L[0, 1]: ... + @deprecated("In future, it will be an error for 'np.bool' scalars to be interpreted as an index") + def __index__(self, /) -> L[0, 1]: ... + def __abs__(self) -> Self: ... + + @overload + def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + @overload + def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __invert__(self, /) -> np.bool: ... + __add__: _BoolOp[np.bool] __radd__: _BoolOp[np.bool] __sub__: _BoolSub __rsub__: _BoolSub __mul__: _BoolOp[np.bool] __rmul__: _BoolOp[np.bool] + __truediv__: _BoolTrueDiv + __rtruediv__: _BoolTrueDiv __floordiv__: _BoolOp[int8] __rfloordiv__: _BoolOp[int8] __pow__: _BoolOp[int8] __rpow__: _BoolOp[int8] - __truediv__: _BoolTrueDiv - __rtruediv__: _BoolTrueDiv - def __invert__(self) -> np.bool: ... + __lshift__: _BoolBitOp[int8] __rlshift__: _BoolBitOp[int8] __rshift__: _BoolBitOp[int8] __rrshift__: _BoolBitOp[int8] - __and__: _BoolBitOp[np.bool] - __rand__: _BoolBitOp[np.bool] - __xor__: _BoolBitOp[np.bool] - __rxor__: _BoolBitOp[np.bool] - __or__: _BoolBitOp[np.bool] - __ror__: _BoolBitOp[np.bool] + + @overload + def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + @overload + def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __and__(self, other: int, /) -> np.bool | intp: ... + __rand__ = __and__ + + @overload + def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + @overload + def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __xor__(self, other: int, /) -> np.bool | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __or__(self, other: int, /) -> np.bool | intp: ... + __ror__ = __or__ + __mod__: _BoolMod __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] -bool_: TypeAlias = bool +# NOTE: This should _not_ be `Final` or a `TypeAlias` +bool_ = bool +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. @final -class object_(generic): +class object_(_RealMixin, generic): + @overload + def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + @overload + def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + @overload # catch-all + def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] def __init__(self, value: object = ..., /) -> None: ... - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - # The 3 protocols below may or may not raise, - # depending on the underlying object - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... if sys.version_info >= (3, 12): def __release_buffer__(self, buffer: memoryview, /) -> None: ... -# The `datetime64` constructors requires an object with the three attributes below, -# and thus supports datetime duck typing -class _DatetimeScalar(Protocol): - @property - def day(self) -> int: ... - @property - def month(self) -> int: ... - @property - def year(self) -> int: ... +class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): + @abstractmethod + def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... -# TODO: `item`/`tolist` returns either `dt.date`, `dt.datetime` or `int` -# depending on the unit -class datetime64(generic): - @overload - def __init__( - self, - value: None | datetime64 | _CharLike_co | _DatetimeScalar = ..., - format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., - /, - ) -> None: ... - @overload - def __init__( - self, - value: int, - format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], - /, - ) -> None: ... - def __add__(self, other: _TD64Like_co, /) -> datetime64: ... - def __radd__(self, other: _TD64Like_co, /) -> datetime64: ... - @overload - def __sub__(self, other: datetime64, /) -> timedelta64: ... - @overload - def __sub__(self, other: _TD64Like_co, /) -> datetime64: ... - def __rsub__(self, other: datetime64, /) -> timedelta64: ... - __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... -_IntValue: TypeAlias = SupportsInt | _CharLike_co | SupportsIndex -_FloatValue: TypeAlias = None | _CharLike_co | SupportsFloat | SupportsIndex -_ComplexValue: TypeAlias = ( - None - | _CharLike_co - | SupportsFloat - | SupportsComplex - | SupportsIndex - | complex # `complex` is not a subtype of `SupportsComplex` -) - -class integer(number[_NBit1]): # type: ignore - @property - def numerator(self: _ScalarType) -> _ScalarType: ... - @property - def denominator(self) -> L[1]: ... - @overload - def __round__(self, ndigits: None = ..., /) -> int: ... - @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex, /) -> _ScalarType: ... - - # NOTE: `__index__` is technically defined in the bottom-most - # sub-classes (`int64`, `uint32`, etc) - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> int: ... - def tolist(self) -> int: ... - def is_integer(self) -> L[True]: ... - def bit_count(self: _ScalarType) -> int: ... - def __index__(self) -> int: ... - __truediv__: _IntTrueDiv[_NBit1] - __rtruediv__: _IntTrueDiv[_NBit1] + __truediv__: _IntTrueDiv[_NBit] + __rtruediv__: _IntTrueDiv[_NBit] def __mod__(self, value: _IntLike_co, /) -> integer[Any]: ... def __rmod__(self, value: _IntLike_co, /) -> integer[Any]: ... - def __invert__(self: _IntType) -> _IntType: ... # Ensure that objects annotated as `integer` support bit-wise operations def __lshift__(self, other: _IntLike_co, /) -> integer[Any]: ... def __rlshift__(self, other: _IntLike_co, /) -> integer[Any]: ... @@ -3065,7 +4021,8 @@ class integer(number[_NBit1]): # type: ignore def __rxor__(self, other: _IntLike_co, /) -> integer[Any]: ... class signedinteger(integer[_NBit1]): - def __init__(self, value: _IntValue = ..., /) -> None: ... + def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + __add__: _SignedIntOp[_NBit1] __radd__: _SignedIntOp[_NBit1] __sub__: _SignedIntOp[_NBit1] @@ -3091,264 +4048,712 @@ class signedinteger(integer[_NBit1]): __divmod__: _SignedIntDivMod[_NBit1] __rdivmod__: _SignedIntDivMod[_NBit1] -int8 = signedinteger[_8Bit] -int16 = signedinteger[_16Bit] -int32 = signedinteger[_32Bit] -int64 = signedinteger[_64Bit] +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBit1]): + # NOTE: `uint64 + signedinteger -> float64` + def __init__(self, value: _ConvertibleToInt = ..., /) -> None: ... + + __add__: _UnsignedIntOp[_NBit1] + __radd__: _UnsignedIntOp[_NBit1] + __sub__: _UnsignedIntOp[_NBit1] + __rsub__: _UnsignedIntOp[_NBit1] + __mul__: _UnsignedIntOp[_NBit1] + __rmul__: _UnsignedIntOp[_NBit1] + __floordiv__: _UnsignedIntOp[_NBit1] + __rfloordiv__: _UnsignedIntOp[_NBit1] + __pow__: _UnsignedIntOp[_NBit1] + __rpow__: _UnsignedIntOp[_NBit1] + __lshift__: _UnsignedIntBitOp[_NBit1] + __rlshift__: _UnsignedIntBitOp[_NBit1] + __rshift__: _UnsignedIntBitOp[_NBit1] + __rrshift__: _UnsignedIntBitOp[_NBit1] + __and__: _UnsignedIntBitOp[_NBit1] + __rand__: _UnsignedIntBitOp[_NBit1] + __xor__: _UnsignedIntBitOp[_NBit1] + __rxor__: _UnsignedIntBitOp[_NBit1] + __or__: _UnsignedIntBitOp[_NBit1] + __ror__: _UnsignedIntBitOp[_NBit1] + __mod__: _UnsignedIntMod[_NBit1] + __rmod__: _UnsignedIntMod[_NBit1] + __divmod__: _UnsignedIntDivMod[_NBit1] + __rdivmod__: _UnsignedIntDivMod[_NBit1] + +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] + +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): + @abstractmethod + def __init__(self, value: _InexactItemT_co | None = ..., /) -> None: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): + def __init__(self, value: _ConvertibleToFloat | None = ..., /) -> None: ... + + __add__: _FloatOp[_NBit1] + __radd__: _FloatOp[_NBit1] + __sub__: _FloatOp[_NBit1] + __rsub__: _FloatOp[_NBit1] + __mul__: _FloatOp[_NBit1] + __rmul__: _FloatOp[_NBit1] + __truediv__: _FloatOp[_NBit1] + __rtruediv__: _FloatOp[_NBit1] + __floordiv__: _FloatOp[_NBit1] + __rfloordiv__: _FloatOp[_NBit1] + __pow__: _FloatOp[_NBit1] + __rpow__: _FloatOp[_NBit1] + __mod__: _FloatMod[_NBit1] + __rmod__: _FloatMod[_NBit1] + __divmod__: _FloatDivMod[_NBit1] + __rdivmod__: _FloatDivMod[_NBit1] + + # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes + def is_integer(self, /) -> builtins.bool: ... + def as_integer_ratio(self, /) -> tuple[int, int]: ... + +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] + +# either a C `double`, `float`, or `longdouble` +class float64(floating[_64Bit], float): # type: ignore[misc] + def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... + + # + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work) + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + def conjugate(self) -> Self: ... + def __getformat__(self, typestr: L["double", "float"], /) -> str: ... + def __getnewargs__(self, /) -> tuple[float]: ... + + # float64-specific operator overrides + @overload + def __add__(self, other: _Float64_co, /) -> float64: ... + @overload + def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __add__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __radd__(self, other: _Float64_co, /) -> float64: ... + @overload + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __radd__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __sub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __sub__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rsub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rsub__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __mul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __mul__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rmul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rmul__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __truediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __truediv__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __floordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __pow__(self, other: _Float64_co, /) -> float64: ... + @overload + def __pow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __pow__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rpow__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rpow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rpow__(self, other: complex, /) -> float64 | complex128: ... + + def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + + +half: TypeAlias = floating[_NBitHalf] +single: TypeAlias = floating[_NBitSingle] +double: TypeAlias = floating[_NBitDouble] +longdouble: TypeAlias = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): + @overload + def __init__( + self, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> None: ... + @overload + def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... + + @property + def real(self) -> floating[_NBit1]: ... # type: ignore[override] + @property + def imag(self) -> floating[_NBit2]: ... # type: ignore[override] + + # NOTE: `__complex__` is technically defined in the concrete subtypes + def __complex__(self, /) -> complex: ... + def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + @deprecated( + "The Python built-in `round` is deprecated for complex scalars, and will raise a `TypeError` in a future release. " + "Use `np.round` or `scalar.round` instead." + ) + def __round__(self, /, ndigits: SupportsIndex | None = None) -> Self: ... + + @overload + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __pow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __pow__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __pow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rpow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rpow__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rpow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + +complex64: TypeAlias = complexfloating[_32Bit, _32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... + + # + @property + def itemsize(self) -> L[16]: ... + @property + def nbytes(self) -> L[16]: ... + + # overrides for `floating` and `builtins.float` compatibility + @property + def real(self) -> float64: ... + @property + def imag(self) -> float64: ... + def conjugate(self) -> Self: ... + def __abs__(self) -> float64: ... # type: ignore[override] + def __getnewargs__(self, /) -> tuple[float, float]: ... + + # complex128-specific operator overrides + @overload + def __add__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __sub__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __mul__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __truediv__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __pow__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, /) -> complex128: ... + +csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] +cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] +clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] + +class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... + @overload + def __init__(self: timedelta64[L[0]], /) -> None: ... + @overload + def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... + @overload + def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + @overload + def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + @overload + def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + @overload + def __init__( + self: timedelta64[dt.timedelta], + value: dt.timedelta | _IntLike_co, + format: _TimeUnitSpec[_NativeTD64Unit] = ..., + /, + ) -> None: ... + @overload + def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self: timedelta64[int], /) -> int: ... + def __float__(self: timedelta64[int], /) -> float: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __abs__(self, /) -> Self: ... + + @overload + def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + __radd__ = __add__ + + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer[Any] | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating[Any], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mul__(self, x: float | np.floating[Any] | np.integer[Any] | np.bool, /) -> timedelta64: ... + __rmul__ = __mul__ + + @overload + def __mod__(self, x: timedelta64[None | L[0]], /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + @overload + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, x: timedelta64[None | L[0]], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... -byte = signedinteger[_NBitByte] -short = signedinteger[_NBitShort] -intc = signedinteger[_NBitIntC] -intp = signedinteger[_NBitIntP] -int_ = intp -long = signedinteger[_NBitLong] -longlong = signedinteger[_NBitLongLong] + @overload + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... -# TODO: `item`/`tolist` returns either `dt.timedelta` or `int` -# depending on the unit -class timedelta64(generic): - def __init__( - self, - value: None | int | _CharLike_co | dt.timedelta | timedelta64 = ..., - format: _CharLike_co | tuple[_CharLike_co, _IntLike_co] = ..., - /, - ) -> None: ... - @property - def numerator(self: _ScalarType) -> _ScalarType: ... - @property - def denominator(self) -> L[1]: ... + @overload + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + + @overload + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + + @overload + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + @overload + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... - # NOTE: Only a limited number of units support conversion - # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` - def __int__(self) -> int: ... - def __float__(self) -> float: ... - def __complex__(self) -> complex: ... - def __neg__(self: _ArraySelf) -> _ArraySelf: ... - def __pos__(self: _ArraySelf) -> _ArraySelf: ... - def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: _TD64Like_co, /) -> timedelta64: ... - def __radd__(self, other: _TD64Like_co, /) -> timedelta64: ... - def __sub__(self, other: _TD64Like_co, /) -> timedelta64: ... - def __rsub__(self, other: _TD64Like_co, /) -> timedelta64: ... - def __mul__(self, other: _FloatLike_co, /) -> timedelta64: ... - def __rmul__(self, other: _FloatLike_co, /) -> timedelta64: ... - __truediv__: _TD64Div[float64] - __floordiv__: _TD64Div[int64] - def __rtruediv__(self, other: timedelta64, /) -> float64: ... - def __rfloordiv__(self, other: timedelta64, /) -> int64: ... - def __mod__(self, other: timedelta64, /) -> timedelta64: ... - def __rmod__(self, other: timedelta64, /) -> timedelta64: ... - def __divmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... - def __rdivmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] -class unsignedinteger(integer[_NBit1]): - # NOTE: `uint64 + signedinteger -> float64` - def __init__(self, value: _IntValue = ..., /) -> None: ... - __add__: _UnsignedIntOp[_NBit1] - __radd__: _UnsignedIntOp[_NBit1] - __sub__: _UnsignedIntOp[_NBit1] - __rsub__: _UnsignedIntOp[_NBit1] - __mul__: _UnsignedIntOp[_NBit1] - __rmul__: _UnsignedIntOp[_NBit1] - __floordiv__: _UnsignedIntOp[_NBit1] - __rfloordiv__: _UnsignedIntOp[_NBit1] - __pow__: _UnsignedIntOp[_NBit1] - __rpow__: _UnsignedIntOp[_NBit1] - __lshift__: _UnsignedIntBitOp[_NBit1] - __rlshift__: _UnsignedIntBitOp[_NBit1] - __rshift__: _UnsignedIntBitOp[_NBit1] - __rrshift__: _UnsignedIntBitOp[_NBit1] - __and__: _UnsignedIntBitOp[_NBit1] - __rand__: _UnsignedIntBitOp[_NBit1] - __xor__: _UnsignedIntBitOp[_NBit1] - __rxor__: _UnsignedIntBitOp[_NBit1] - __or__: _UnsignedIntBitOp[_NBit1] - __ror__: _UnsignedIntBitOp[_NBit1] - __mod__: _UnsignedIntMod[_NBit1] - __rmod__: _UnsignedIntMod[_NBit1] - __divmod__: _UnsignedIntDivMod[_NBit1] - __rdivmod__: _UnsignedIntDivMod[_NBit1] - -uint8: TypeAlias = unsignedinteger[_8Bit] -uint16: TypeAlias = unsignedinteger[_16Bit] -uint32: TypeAlias = unsignedinteger[_32Bit] -uint64: TypeAlias = unsignedinteger[_64Bit] - -ubyte: TypeAlias = unsignedinteger[_NBitByte] -ushort: TypeAlias = unsignedinteger[_NBitShort] -uintc: TypeAlias = unsignedinteger[_NBitIntC] -uintp: TypeAlias = unsignedinteger[_NBitIntP] -uint: TypeAlias = uintp -ulong: TypeAlias = unsignedinteger[_NBitLong] -ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] - -class inexact(number[_NBit1]): # type: ignore - def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... - -_IntType = TypeVar("_IntType", bound=integer[Any]) -_FloatType = TypeVar('_FloatType', bound=floating[Any]) - -class floating(inexact[_NBit1]): - def __init__(self, value: _FloatValue = ..., /) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., - /, - ) -> float: ... - def tolist(self) -> float: ... - def is_integer(self) -> builtins.bool: ... - def hex(self: float64) -> str: ... - @classmethod - def fromhex(cls: type[float64], string: str, /) -> float64: ... - def as_integer_ratio(self) -> tuple[int, int]: ... - def __ceil__(self: float64) -> int: ... - def __floor__(self: float64) -> int: ... - def __trunc__(self: float64) -> int: ... - def __getnewargs__(self: float64) -> tuple[float]: ... - def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... - @overload - def __round__(self, ndigits: None = ..., /) -> int: ... - @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex, /) -> _ScalarType: ... - __add__: _FloatOp[_NBit1] - __radd__: _FloatOp[_NBit1] - __sub__: _FloatOp[_NBit1] - __rsub__: _FloatOp[_NBit1] - __mul__: _FloatOp[_NBit1] - __rmul__: _FloatOp[_NBit1] - __truediv__: _FloatOp[_NBit1] - __rtruediv__: _FloatOp[_NBit1] - __floordiv__: _FloatOp[_NBit1] - __rfloordiv__: _FloatOp[_NBit1] - __pow__: _FloatOp[_NBit1] - __rpow__: _FloatOp[_NBit1] - __mod__: _FloatMod[_NBit1] - __rmod__: _FloatMod[_NBit1] - __divmod__: _FloatDivMod[_NBit1] - __rdivmod__: _FloatDivMod[_NBit1] - -float16: TypeAlias = floating[_16Bit] -float32: TypeAlias = floating[_32Bit] -float64: TypeAlias = floating[_64Bit] +class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... -half: TypeAlias = floating[_NBitHalf] -single: TypeAlias = floating[_NBitSingle] -double: TypeAlias = floating[_NBitDouble] -longdouble: TypeAlias = floating[_NBitLongDouble] + @overload + def __init__(self, value: datetime64[_DT64ItemT_co], /) -> None: ... + @overload + def __init__(self: datetime64[_AnyDT64Arg], value: _AnyDT64Arg, /) -> None: ... + @overload + def __init__(self: datetime64[None], value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> None: ... + @overload + def __init__(self: datetime64[dt.datetime], value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> None: ... + @overload + def __init__(self: datetime64[dt.date], value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> None: ... + @overload + def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + @overload + def __init__( + self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> None: ... + @overload + def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + @overload + def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... -# The main reason for `complexfloating` having two typevars is cosmetic. -# It is used to clarify why `complex128`s precision is `_64Bit`, the latter -# describing the two 64 bit floats representing its real and imaginary component + @overload + def __add__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + @overload + def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... + @overload + def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + @overload + def __add__(self, x: _TD64Like_co, /) -> datetime64: ... + __radd__ = __add__ -class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): - def __init__(self, value: _ComplexValue = ..., /) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> complex: ... - def tolist(self) -> complex: ... - @property - def real(self) -> floating[_NBit1]: ... # type: ignore[override] - @property - def imag(self) -> floating[_NBit2]: ... # type: ignore[override] - def __abs__(self) -> floating[_NBit1]: ... # type: ignore[override] - def __getnewargs__(self: complex128) -> tuple[float, float]: ... - # NOTE: Deprecated - # def __round__(self, ndigits=...): ... - __add__: _ComplexOp[_NBit1] - __radd__: _ComplexOp[_NBit1] - __sub__: _ComplexOp[_NBit1] - __rsub__: _ComplexOp[_NBit1] - __mul__: _ComplexOp[_NBit1] - __rmul__: _ComplexOp[_NBit1] - __truediv__: _ComplexOp[_NBit1] - __rtruediv__: _ComplexOp[_NBit1] - __pow__: _ComplexOp[_NBit1] - __rpow__: _ComplexOp[_NBit1] + @overload + def __sub__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ... + @overload + def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self, x: _TD64Like_co, /) -> datetime64: ... + @overload + def __sub__(self, x: datetime64, /) -> timedelta64: ... -complex64: TypeAlias = complexfloating[_32Bit, _32Bit] -complex128: TypeAlias = complexfloating[_64Bit, _64Bit] + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, x: datetime64, /) -> timedelta64: ... -csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] -cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] -clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] + __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] -class flexible(generic): ... # type: ignore +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... -# TODO: `item`/`tolist` returns either `bytes` or `tuple` -# depending on whether or not it's used as an opaque bytes sequence -# or a structure -class void(flexible): +class void(flexible[bytes | tuple[Any, ...]]): @overload - def __init__(self, value: _IntLike_co | bytes, /, dtype : None = ...) -> None: ... + def __init__(self, value: _IntLike_co | bytes, /, dtype: None = None) -> None: ... @overload def __init__(self, value: Any, /, dtype: _DTypeLikeVoid) -> None: ... - @property - def real(self: _ArraySelf) -> _ArraySelf: ... - @property - def imag(self: _ArraySelf) -> _ArraySelf: ... - def setfield( - self, val: ArrayLike, dtype: DTypeLike, offset: int = ... - ) -> None: ... + @overload def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload def __getitem__(self, key: list[str], /) -> void: ... - def __setitem__( - self, - key: str | list[str] | SupportsIndex, - value: ArrayLike, - /, - ) -> None: ... + def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... -class character(flexible): # type: ignore - def __int__(self) -> int: ... - def __float__(self) -> float: ... + def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... + +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): + @abstractmethod + def __init__(self, value: _CharacterItemT_co = ..., /) -> None: ... -# NOTE: Most `np.bytes_` / `np.str_` methods return their -# builtin `bytes` / `str` counterpart +# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart -class bytes_(character, bytes): +class bytes_(character[bytes], bytes): @overload - def __init__(self, value: object = ..., /) -> None: ... + def __new__(cls, o: object = ..., /) -> Self: ... @overload - def __init__( - self, value: str, /, encoding: str = ..., errors: str = ... - ) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> bytes: ... - def tolist(self) -> bytes: ... + def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... -class str_(character, str): + # @overload - def __init__(self, value: object = ..., /) -> None: ... + def __init__(self, o: object = ..., /) -> None: ... @overload - def __init__( - self, value: bytes, /, encoding: str = ..., errors: str = ... - ) -> None: ... - def item( - self, args: L[0] | tuple[()] | tuple[L[0]] = ..., /, - ) -> str: ... - def tolist(self) -> str: ... - -# -# Constants -# + def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... -e: Final[float] -euler_gamma: Final[float] -inf: Final[float] -nan: Final[float] -pi: Final[float] + # + def __bytes__(self, /) -> bytes: ... -little_endian: Final[builtins.bool] -True_: Final[np.bool] -False_: Final[np.bool] +class str_(character[str], str): + @overload + def __new__(cls, value: object = ..., /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... -newaxis: None + # + @overload + def __init__(self, value: object = ..., /) -> None: ... + @overload + def __init__(self, value: bytes, /, encoding: str = ..., errors: str = ...) -> None: ... # See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs @final @@ -3356,6 +4761,8 @@ class ufunc: @property def __name__(self) -> LiteralString: ... @property + def __qualname__(self) -> LiteralString: ... + @property def __doc__(self) -> str: ... @property def nin(self) -> int: ... @@ -3388,13 +4795,13 @@ class ufunc: # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn | Any: ... - def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn | NDArray[Any]: ... - def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn | NDArray[Any]: ... - def outer(self, *args: Any, **kwargs: Any) -> NoReturn | Any: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + def outer(self, *args: Any, **kwargs: Any) -> Any: ... # Similarly at won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - def at(self, /, *args: Any, **kwargs: Any) -> NoReturn | None: ... + def at(self, /, *args: Any, **kwargs: Any) -> None: ... # Parameters: `__name__`, `ntypes` and `identity` absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] @@ -3460,6 +4867,7 @@ logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] @@ -3489,6 +4897,7 @@ tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos @@ -3505,18 +4914,11 @@ bitwise_right_shift = right_shift permute_dims = transpose pow = power -class _CopyMode(enum.Enum): - ALWAYS: L[True] - IF_NEEDED: L[False] - NEVER: L[2] - -_CallType = TypeVar("_CallType", bound=Callable[..., Any]) - class errstate: def __init__( self, *, - call: _ErrFunc | _SupportsWrite[str] = ..., + call: _ErrCall = ..., all: None | _ErrKind = ..., divide: None | _ErrKind = ..., over: None | _ErrKind = ..., @@ -3531,23 +4933,16 @@ class errstate: traceback: None | TracebackType, /, ) -> None: ... - def __call__(self, func: _CallType) -> _CallType: ... - -@contextmanager -def _no_nep50_warning() -> Generator[None, None, None]: ... -def _get_promotion_state() -> str: ... -def _set_promotion_state(state: str, /) -> None: ... - -_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) + def __call__(self, func: _CallableT) -> _CallableT: ... -class ndenumerate(Generic[_ScalarType_co]): +class ndenumerate(Generic[_SCT_co]): @property - def iter(self) -> flatiter[NDArray[_ScalarType_co]]: ... + def iter(self) -> flatiter[NDArray[_SCT_co]]: ... @overload def __new__( - cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], - ) -> ndenumerate[_ScalarType]: ... + cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]], + ) -> ndenumerate[_SCT]: ... @overload def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... @overload @@ -3568,20 +4963,20 @@ class ndenumerate(Generic[_ScalarType_co]): def __next__( self: ndenumerate[np.bool | datetime64 | timedelta64 | number[Any] | flexible], /, - ) -> tuple[_Shape, _ScalarType_co]: ... + ) -> tuple[_Shape, _SCT_co]: ... @overload def __next__(self: ndenumerate[object_], /) -> tuple[_Shape, Any]: ... @overload - def __next__(self, /) -> tuple[_Shape, _ScalarType_co]: ... + def __next__(self, /) -> tuple[_Shape, _SCT_co]: ... - def __iter__(self: _T) -> _T: ... + def __iter__(self) -> Self: ... class ndindex: @overload def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... @overload def __init__(self, *shape: SupportsIndex) -> None: ... - def __iter__(self: _T) -> _T: ... + def __iter__(self) -> Self: ... def __next__(self) -> _Shape: ... # TODO: The type of each `__next__` and `iters` return-type depends @@ -3605,7 +5000,7 @@ class broadcast: @property def size(self) -> int: ... def __next__(self) -> tuple[Any, ...]: ... - def __iter__(self: _T) -> _T: ... + def __iter__(self) -> Self: ... def reset(self) -> None: ... @final @@ -3620,27 +5015,27 @@ class busdaycalendar: @property def holidays(self) -> NDArray[datetime64]: ... -class finfo(Generic[_FloatType]): - dtype: dtype[_FloatType] - bits: int - eps: _FloatType - epsneg: _FloatType - iexp: int - machep: int - max: _FloatType - maxexp: int - min: _FloatType - minexp: int - negep: int - nexp: int - nmant: int - precision: int - resolution: _FloatType - smallest_subnormal: _FloatType - @property - def smallest_normal(self) -> _FloatType: ... - @property - def tiny(self) -> _FloatType: ... +class finfo(Generic[_FloatingT_co]): + dtype: Final[dtype[_FloatingT_co]] + bits: Final[int] + eps: Final[_FloatingT_co] + epsneg: Final[_FloatingT_co] + iexp: Final[int] + machep: Final[int] + max: Final[_FloatingT_co] + maxexp: Final[int] + min: Final[_FloatingT_co] + minexp: Final[int] + negep: Final[int] + nexp: Final[int] + nmant: Final[int] + precision: Final[int] + resolution: Final[_FloatingT_co] + smallest_subnormal: Final[_FloatingT_co] + @property + def smallest_normal(self) -> _FloatingT_co: ... + @property + def tiny(self) -> _FloatingT_co: ... @overload def __new__( cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]] @@ -3654,64 +5049,33 @@ class finfo(Generic[_FloatType]): cls, dtype: str ) -> finfo[floating[Any]]: ... -class iinfo(Generic[_IntType]): - dtype: dtype[_IntType] - kind: LiteralString - bits: int - key: LiteralString + +class iinfo(Generic[_IntegerT_co]): + dtype: Final[dtype[_IntegerT_co]] + kind: Final[LiteralString] + bits: Final[int] + key: Final[LiteralString] @property def min(self) -> int: ... @property def max(self) -> int: ... @overload - def __new__(cls, dtype: _IntType | _DTypeLike[_IntType]) -> iinfo[_IntType]: ... + def __new__( + cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] + ) -> iinfo[_IntegerT_co]: ... @overload def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... @overload def __new__(cls, dtype: str) -> iinfo[Any]: ... -_NDIterFlagsKind: TypeAlias = L[ - "buffered", - "c_index", - "copy_if_overlap", - "common_dtype", - "delay_bufalloc", - "external_loop", - "f_index", - "grow_inner", "growinner", - "multi_index", - "ranged", - "refs_ok", - "reduce_ok", - "zerosize_ok", -] - -_NDIterOpFlagsKind: TypeAlias = L[ - "aligned", - "allocate", - "arraymask", - "copy", - "config", - "nbo", - "no_subtype", - "no_broadcast", - "overlap_assume_elementwise", - "readonly", - "readwrite", - "updateifcopy", - "virtual", - "writeonly", - "writemasked" -] - @final class nditer: def __new__( cls, - op: ArrayLike | Sequence[ArrayLike], + op: ArrayLike | Sequence[ArrayLike | None], flags: None | Sequence[_NDIterFlagsKind] = ..., - op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ..., op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., @@ -3778,14 +5142,7 @@ class nditer: @property def value(self) -> tuple[NDArray[Any], ...]: ... -_MemMapModeKind: TypeAlias = L[ - "readonly", "r", - "copyonwrite", "c", - "readwrite", "r+", - "write", "w+", -] - -class memmap(ndarray[_ShapeType_co, _DType_co]): +class memmap(ndarray[_ShapeT_co, _DType_co]): __array_priority__: ClassVar[float] filename: str | None offset: int @@ -3793,7 +5150,7 @@ class memmap(ndarray[_ShapeType_co, _DType_co]): @overload def __new__( subtype, - filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: type[uint8] = ..., mode: _MemMapModeKind = ..., offset: int = ..., @@ -3803,17 +5160,17 @@ class memmap(ndarray[_ShapeType_co, _DType_co]): @overload def __new__( subtype, - filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, - dtype: _DTypeLike[_ScalarType], + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[_SCT], mode: _MemMapModeKind = ..., offset: int = ..., shape: None | int | tuple[int, ...] = ..., order: _OrderKACF = ..., - ) -> memmap[Any, dtype[_ScalarType]]: ... + ) -> memmap[Any, dtype[_SCT]]: ... @overload def __new__( subtype, - filename: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _MemMapIOProtocol, + filename: StrOrBytesPath | _SupportsFileMethodsRW, dtype: DTypeLike, mode: _MemMapModeKind = ..., offset: int = ..., @@ -3823,7 +5180,7 @@ class memmap(ndarray[_ShapeType_co, _DType_co]): def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, - array: memmap[_ShapeType_co, _DType_co], + array: memmap[_ShapeT_co, _DType_co], context: None | tuple[ufunc, tuple[Any, ...], int] = ..., return_scalar: builtins.bool = ..., ) -> Any: ... @@ -3881,13 +5238,12 @@ class poly1d: @coefficients.setter def coefficients(self, value: NDArray[Any]) -> None: ... - __hash__: ClassVar[None] # type: ignore + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] - # TODO: use `tuple[int]` as shape type once covariant (#26081) @overload - def __array__(self, t: None = ..., copy: None | bool = ...) -> NDArray[Any]: ... + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype[Any]]: ... @overload - def __array__(self, t: _DType, copy: None | bool = ...) -> ndarray[Any, _DType]: ... + def __array__(self, /, t: _DType, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DType]: ... @overload def __call__(self, val: _ScalarLike_co) -> Any: ... @@ -3927,15 +5283,14 @@ class poly1d: ) -> poly1d: ... - -class matrix(ndarray[_Shape2DType_co, _DType_co]): +class matrix(ndarray[_2DShapeT_co, _DType_co]): __array_priority__: ClassVar[float] def __new__( subtype, data: ArrayLike, dtype: DTypeLike = ..., copy: builtins.bool = ..., - ) -> matrix[Any, Any]: ... + ) -> matrix[_2D, Any]: ... def __array_finalize__(self, obj: object) -> None: ... @overload @@ -3954,142 +5309,134 @@ class matrix(ndarray[_Shape2DType_co, _DType_co]): key: ( None | slice - | ellipsis + | EllipsisType | SupportsIndex | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] ), /, - ) -> matrix[Any, _DType_co]: ... + ) -> matrix[_2D, _DType_co]: ... @overload - def __getitem__(self: NDArray[void], key: str, /) -> matrix[Any, dtype[Any]]: ... + def __getitem__(self: NDArray[void], key: str, /) -> matrix[_2D, dtype[Any]]: ... @overload - def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_Shape2DType_co, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_2DShapeT_co, dtype[void]]: ... - def __mul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __rmul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __imul__(self, other: ArrayLike, /) -> matrix[_Shape2DType_co, _DType_co]: ... - def __pow__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... - def __ipow__(self, other: ArrayLike, /) -> matrix[_Shape2DType_co, _DType_co]: ... + def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... + def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... + def __imul__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DType_co]: ... + def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... + def __ipow__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DType_co]: ... @overload def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... @overload - def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + def sum(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... @overload - def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def sum(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload def mean(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... @overload - def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + def mean(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... @overload - def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def mean(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload def std(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... @overload - def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + def std(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[_2D, Any]: ... @overload - def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + def std(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ..., ddof: float = ...) -> _ArrayT: ... @overload def var(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> Any: ... @overload - def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[Any, Any]: ... + def var(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ..., ddof: float = ...) -> matrix[_2D, Any]: ... @overload - def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ..., ddof: float = ...) -> _NdArraySubClass: ... + def var(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ..., ddof: float = ...) -> _ArrayT: ... @overload def prod(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... @overload - def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[Any, Any]: ... + def prod(self, axis: _ShapeLike, dtype: DTypeLike = ..., out: None = ...) -> matrix[_2D, Any]: ... @overload - def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def prod(self, axis: None | _ShapeLike = ..., dtype: DTypeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload def any(self, axis: None = ..., out: None = ...) -> np.bool: ... @overload - def any(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[np.bool]]: ... + def any(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[np.bool]]: ... @overload - def any(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def any(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload def all(self, axis: None = ..., out: None = ...) -> np.bool: ... @overload - def all(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[np.bool]]: ... + def all(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[np.bool]]: ... @overload - def all(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def all(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload - def max(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + def max(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> _SCT: ... @overload - def max(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + def max(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DType_co]: ... @overload - def max(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def max(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload - def min(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + def min(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> _SCT: ... @overload - def min(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + def min(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DType_co]: ... @overload - def min(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def min(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload - def argmax(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + def argmax(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> intp: ... @overload - def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + def argmax(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[intp]]: ... @overload - def argmax(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def argmax(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload - def argmin(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> intp: ... + def argmin(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> intp: ... @overload - def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, dtype[intp]]: ... + def argmin(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, dtype[intp]]: ... @overload - def argmin(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def argmin(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... @overload - def ptp(self: NDArray[_ScalarType], axis: None = ..., out: None = ...) -> _ScalarType: ... + def ptp(self: NDArray[_SCT], axis: None = ..., out: None = ...) -> _SCT: ... @overload - def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[Any, _DType_co]: ... + def ptp(self, axis: _ShapeLike, out: None = ...) -> matrix[_2D, _DType_co]: ... @overload - def ptp(self, axis: None | _ShapeLike = ..., out: _NdArraySubClass = ...) -> _NdArraySubClass: ... + def ptp(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... - def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[Any, _DType_co]: ... - def tolist(self: matrix[Any, dtype[_SupportsItem[_T]]]) -> list[list[_T]]: ... # type: ignore[typevar] - def ravel(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... - def flatten(self, order: _OrderKACF = ...) -> matrix[Any, _DType_co]: ... + def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[_2D, _DType_co]: ... + def tolist(self: _SupportsItem[_T]) -> list[list[_T]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] @property - def T(self) -> matrix[Any, _DType_co]: ... + def T(self) -> matrix[_2D, _DType_co]: ... @property - def I(self) -> matrix[Any, Any]: ... + def I(self) -> matrix[_2D, Any]: ... @property - def A(self) -> ndarray[_Shape2DType_co, _DType_co]: ... + def A(self) -> ndarray[_2DShapeT_co, _DType_co]: ... @property - def A1(self) -> ndarray[Any, _DType_co]: ... + def A1(self) -> ndarray[_Shape, _DType_co]: ... @property - def H(self) -> matrix[Any, _DType_co]: ... - def getT(self) -> matrix[Any, _DType_co]: ... - def getI(self) -> matrix[Any, Any]: ... - def getA(self) -> ndarray[_Shape2DType_co, _DType_co]: ... - def getA1(self) -> ndarray[Any, _DType_co]: ... - def getH(self) -> matrix[Any, _DType_co]: ... - -_CharType = TypeVar("_CharType", str_, bytes_) -_CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) + def H(self) -> matrix[_2D, _DType_co]: ... + def getT(self) -> matrix[_2D, _DType_co]: ... + def getI(self) -> matrix[_2D, Any]: ... + def getA(self) -> ndarray[_2DShapeT_co, _DType_co]: ... + def getA1(self) -> ndarray[_Shape, _DType_co]: ... + def getH(self) -> matrix[_2D, _DType_co]: ... -# NOTE: Deprecated -# class MachAr: ... - -class _SupportsDLPack(Protocol[_T_contra]): - def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... def from_dlpack( - obj: _SupportsDLPack[None], + x: _SupportsDLPack[None], /, *, - device: L["cpu"] | None = ..., - copy: bool | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, + copy: builtins.bool | None = None, +) -> NDArray[number[Any] | np.bool]: ... diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi index 52b98fc0039b..e9c17a6f18ce 100644 --- a/numpy/_array_api_info.pyi +++ b/numpy/_array_api_info.pyi @@ -1,6 +1,4 @@ -import sys from typing import ( - TYPE_CHECKING, ClassVar, Literal, TypeAlias, @@ -8,18 +6,12 @@ from typing import ( TypeVar, final, overload, + type_check_only, ) +from typing_extensions import Never import numpy as np -if sys.version_info >= (3, 11): - from typing import Never -elif TYPE_CHECKING: - from typing_extensions import Never -else: - # `NoReturn` and `Never` are equivalent (but not equal) for type-checkers, - # but are used in different places by convention - from typing import NoReturn as Never _Device: TypeAlias = Literal["cpu"] _DeviceLike: TypeAlias = None | _Device @@ -72,38 +64,44 @@ _Permute3: TypeAlias = ( | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] ) +@type_check_only class _DTypesBool(TypedDict): bool: np.dtype[np.bool] +@type_check_only class _DTypesInt(TypedDict): int8: np.dtype[np.int8] int16: np.dtype[np.int16] int32: np.dtype[np.int32] int64: np.dtype[np.int64] +@type_check_only class _DTypesUInt(TypedDict): uint8: np.dtype[np.uint8] uint16: np.dtype[np.uint16] uint32: np.dtype[np.uint32] uint64: np.dtype[np.uint64] -class _DTypesInteger(_DTypesInt, _DTypesUInt): - ... +@type_check_only +class _DTypesInteger(_DTypesInt, _DTypesUInt): ... +@type_check_only class _DTypesFloat(TypedDict): float32: np.dtype[np.float32] float64: np.dtype[np.float64] +@type_check_only class _DTypesComplex(TypedDict): complex64: np.dtype[np.complex64] complex128: np.dtype[np.complex128] -class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): - ... +@type_check_only +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ... -class _DTypes(_DTypesBool, _DTypesNumber): - ... +@type_check_only +class _DTypes(_DTypesBool, _DTypesNumber): ... +@type_check_only class _DTypesUnion(TypedDict, total=False): bool: np.dtype[np.bool] int8: np.dtype[np.int8] @@ -121,7 +119,6 @@ class _DTypesUnion(TypedDict, total=False): _EmptyDict: TypeAlias = dict[Never, Never] - @final class __array_namespace_info__: __module__: ClassVar[Literal['numpy']] diff --git a/numpy/_build_utils/gcc_build_bitness.py b/numpy/_build_utils/gcc_build_bitness.py index fcad237e98bc..08d02d4d813f 100644 --- a/numpy/_build_utils/gcc_build_bitness.py +++ b/numpy/_build_utils/gcc_build_bitness.py @@ -3,7 +3,7 @@ """ import re -from subprocess import run, PIPE +from subprocess import run def main(): diff --git a/numpy/_build_utils/process_src_template.py b/numpy/_build_utils/process_src_template.py index 4a0915e25254..259c4eaa1628 100644 --- a/numpy/_build_utils/process_src_template.py +++ b/numpy/_build_utils/process_src_template.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import sys import os import argparse import importlib.util diff --git a/numpy/_build_utils/tempita.py b/numpy/_build_utils/tempita.py index 0743b892436b..32e400f9c907 100644 --- a/numpy/_build_utils/tempita.py +++ b/numpy/_build_utils/tempita.py @@ -3,9 +3,7 @@ import os import argparse -from Cython import Tempita as tempita - -# XXX: If this import ever fails (does it really?), vendor cython.tempita +import tempita def process_tempita(fromfile, outfile=None): diff --git a/numpy/_build_utils/tempita/LICENSE.txt b/numpy/_build_utils/tempita/LICENSE.txt new file mode 100644 index 000000000000..0ba6f23c440f --- /dev/null +++ b/numpy/_build_utils/tempita/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008 Ian Bicking and Contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/numpy/_build_utils/tempita/__init__.py b/numpy/_build_utils/tempita/__init__.py new file mode 100644 index 000000000000..41a0ce3d0efa --- /dev/null +++ b/numpy/_build_utils/tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py new file mode 100644 index 000000000000..4864f2949605 --- /dev/null +++ b/numpy/_build_utils/tempita/_looper.py @@ -0,0 +1,156 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +basestring_ = (bytes, str) + +__all__ = ['looper'] + + +class looper: + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter: + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + +class loop_pos: + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py new file mode 100644 index 000000000000..c30b6547ade6 --- /dev/null +++ b/numpy/_build_utils/tempita/_tempita.py @@ -0,0 +1,1132 @@ +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + +import re +import sys +import os +import tokenize +from io import StringIO + +from ._looper import looper + +__all__ = ["TemplateError", "Template", "sub", "bunch"] + +in_re = re.compile(r"\s+in\s+") +var_re = re.compile(r"^[a-z_][a-z0-9_]*$", re.I) +basestring_ = (bytes, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if hasattr(v, "__str__"): + return str(v) + else: + return bytes(v) + return v + + +class TemplateError(Exception): + """Exception raised while parsing a template""" + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = " ".join(self.args) + if self.position: + msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1]) + if self.name: + msg += " in %s" % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, get_template=from_template.get_template + ) + + +class Template: + default_namespace = { + "start_braces": "{{", + "end_braces": "}}", + "looper": looper, + } + + default_encoding = "utf8" + default_inherit = None + + def __init__( + self, + content, + name=None, + namespace=None, + stacklevel=None, + get_template=None, + default_inherit=None, + line_offset=0, + delimiters=None, + ): + self.content = content + + # set delimiters + if delimiters is None: + delimiters = ( + self.default_namespace["start_braces"], + self.default_namespace["end_braces"], + ) + else: + # assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace["start_braces"] = delimiters[0] + self.default_namespace["end_braces"] = delimiters[1] + self.delimiters = delimiters + + self._unicode = isinstance(content, str) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if "__file__" in globals: + name = globals["__file__"] + if name.endswith(".pyc") or name.endswith(".pyo"): + name = name[:-1] + elif "__name__" in globals: + name = globals["__name__"] + else: + name = "" + if lineno: + name += ":%s" % lineno + self.name = name + self._parsed = parse( + content, name=name, line_offset=line_offset, delimiters=self.delimiters + ) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename( + cls, + filename, + namespace=None, + encoding=None, + default_inherit=None, + get_template=get_file_template, + ): + with open(filename, "rb") as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls( + content=c, + name=filename, + namespace=namespace, + default_inherit=default_inherit, + get_template=get_template, + ) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return "<%s %s name=%r>" % ( + self.__class__.__name__, + hex(id(self))[2:], + self.name, + ) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError("You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError("You can only give one positional argument") + if not hasattr(args[0], "items"): + raise TypeError( + "If you pass in a single argument, you must pass in a " + "dictionary-like object (with a .items() method); you gave %r" + % (args[0],) + ) + kw = args[0] + ns = kw + ns["__template_name__"] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if "__inherit__" in defs: + inherit = defs.pop("__inherit__") + else: + inherit = None + return "".join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + "You cannot use inheritance without passing in get_template", + position=None, + name=self.name, + ) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns["self"] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == "py": + self._exec(code[2], ns, pos) + elif name == "continue": + raise _TemplateContinue() + elif name == "break": + raise _TemplateBreak() + elif name == "for": + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == "cond": + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == "expr": + parts = code[2].split("|") + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == "default": + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == "inherit": + expr = code[2] + value = self._eval(expr, ns, pos) + defs["__inherit__"] = value + elif name == "def": + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef( + self, name, signature, body=parts, ns=ns, pos=pos + ) + elif name == "comment": + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + "Need %i items to unpack (got %i items)" + % (len(vars), len(item)) + ) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == "else": + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError("invalid syntax in expression: %s" % code) + return value + except Exception as e: + if getattr(e, "args", None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return "" + if self._unicode: + try: + value = str(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if isinstance(value, str) and self.default_encoding: + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + "Cannot decode bytes value %r into unicode " + "(no default_encoding provided)" % value + ) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + " in string %r" % value, + ) + elif not self._unicode and isinstance(value, str): + if not self.default_encoding: + raise UnicodeEncodeError( + "Cannot encode unicode value %r into bytes " + "(no default_encoding provided)" % value + ) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % (msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get("__name") + tmpl = Template(content, name=name, delimiters=delimiters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if "default" in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, "default") + else: + return dict.__getitem__(self, key) + + def __repr__(self): + return "<%s %s>" % ( + self.__class__.__name__, + " ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]), + ) + + +class TemplateDef: + def __init__( + self, template, func_name, func_signature, body, ns, pos, bound_self=None + ): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return "" % ( + self._func_name, + self._func_signature, + self._template.name, + self._pos, + ) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns["self"] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return "".join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, + self._func_name, + self._func_signature, + self._body, + self._ns, + self._pos, + bound_self=obj, + ) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError("Unexpected argument %s" % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + "Extra position arguments: %s" % ", ".join([repr(v) for v in args]) + ) + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval(value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError("Missing argument: %s" % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject: + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter: + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return "<%s around %r>" % (self.__class__.__name__, self.__template_obj) + + +class _Empty: + def __call__(self, *args, **kw): + return self + + def __str__(self): + return "" + + def __repr__(self): + return "Empty" + + def __unicode__(self): + return "" + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile( + r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1])) + ) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError( + "%s inside expression" % delimiters[0], position=pos, name=name + ) + elif expr == delimiters[1] and not in_expr: + raise TemplateError( + "%s outside expression" % delimiters[1], position=pos, name=name + ) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last: match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError( + "No %s to finish last expression" % delimiters[1], + name=name, + position=last_pos, + ) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + + +statement_re = re.compile(r"^(?:if |elif |for |def |inherit |default |py:)") +single_statements = ["else", "endif", "endfor", "enddef", "continue", "break"] +trail_whitespace_re = re.compile(r"\n\r?[\t ]*$") +lead_whitespace_re = re.compile(r"^[\t ]*\n") + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = "" + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = "" + else: + next_chunk = tokens[i + 1] + if not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = "last" + if prev_ok and ( + not next_chunk + or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()) + ): + if prev: + if (i == 1 and not prev.strip()) or prev_ok == "last": + tokens[i - 1] = "" + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[: m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = "" + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count("\n", last_index, index) + if lines > 0: + column = index - string.rfind("\n", last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] # noqa: E501 + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith("py:"): + expr = expr[3:].lstrip(" \t") + if expr.startswith("\n") or expr.startswith("\r"): + expr = expr.lstrip("\r\n") + if "\r" in expr: + expr = expr.replace("\r\n", "\n") + expr = expr.replace("\r", "") + expr += "\n" + else: + if "\n" in expr: + raise TemplateError( + "Multi-line py blocks must start with a newline", + position=pos, + name=name, + ) + return ("py", pos, expr), tokens[1:] + elif expr in ("continue", "break"): + if "for" not in context: + raise TemplateError("continue outside of for loop", position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith("if "): + return parse_cond(tokens, name, context) + elif expr.startswith("elif ") or expr == "else": + raise TemplateError( + "%s outside of an if block" % expr.split()[0], position=pos, name=name + ) + elif expr in ("if", "elif", "for"): + raise TemplateError("%s with no expression" % expr, position=pos, name=name) + elif expr in ("endif", "endfor", "enddef"): + raise TemplateError("Unexpected %s" % expr, position=pos, name=name) + elif expr.startswith("for "): + return parse_for(tokens, name, context) + elif expr.startswith("default "): + return parse_default(tokens, name, context) + elif expr.startswith("inherit "): + return parse_inherit(tokens, name, context) + elif expr.startswith("def "): + return parse_def(tokens, name, context) + elif expr.startswith("#"): + return ("comment", pos, tokens[0][0]), tokens[1:] + return ("expr", pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ("if",) + while 1: + if not tokens: + raise TemplateError("Missing {{endif}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endif": + return ("cond", start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(":"): + first = first[:-1] + if first.startswith("if "): + part = ("if", pos, first[3:].lstrip(), content) + elif first.startswith("elif "): + part = ("elif", pos, first[5:].lstrip(), content) + elif first == "else": + part = ("else", pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError("No {{endif}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and ( + tokens[0][0] == "endif" + or tokens[0][0].startswith("elif ") + or tokens[0][0] == "else" + ): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ("for",) + context + content = [] + assert first.startswith("for "), first + if first.endswith(":"): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name) + vars = first[: match.start()] + if "(" in vars: + raise TemplateError( + "You cannot have () in the variable section of a for loop (%r)" % vars, + position=pos, + name=name, + ) + vars = tuple([v.strip() for v in first[: match.start()].split(",") if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError("No {{endfor}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endfor": + return ("for", pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("default ") + first = first.split(None, 1)[1] + parts = first.split("=", 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % first, + position=pos, + name=name, + ) + var = parts[0].strip() + if "," in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", position=pos, name=name + ) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" % var, + position=pos, + name=name, + ) + expr = parts[1].strip() + return ("default", pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("inherit ") + expr = first.split(None, 1)[1] + return ("inherit", pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith("def ") + first = first.split(None, 1)[1] + if first.endswith(":"): + first = first[:-1] + if "(" not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(")"): + raise TemplateError( + "Function definition doesn't end with ): %s" % first, + position=start, + name=name, + ) + else: + first = first[:-1] + func_name, sig_text = first.split("(", 1) + sig = parse_signature(sig_text, name, start) + context = context + ("def",) + content = [] + while 1: + if not tokens: + raise TemplateError("Missing {{enddef}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "enddef": + return ("def", start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, "" + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and (tok_string == "*" or tok_string == "**"): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or ( + tok_type == tokenize.OP and tok_string == "," + ): + if var_arg_type == "*": + var_arg = var_name + elif var_arg_type == "**": + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if tok_type == tokenize.OP and tok_string == "=": + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if not nest_count and ( + tok_type == tokenize.ENDMARKER + or (tok_type == tokenize.OP and tok_string == ",") + ): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif ( + nest_count and tok_type == tokenize.OP and tok_string == unnest_type + ): + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif ( + not nest_count + and tok_type == tokenize.OP + and tok_string in ("(", "[", "{") + ): + nest_type = tok_string + nest_count = 1 + unnest_type = {"(": ")", "[": "]", "{": "}"}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow + 1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return "".join(parts) + + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution("Paste") + parser = optparse.OptionParser(version=coerce_text(dist), usage=_fill_command_usage) + parser.add_option( + "-o", + "--output", + dest="output", + metavar="FILENAME", + help="File to write output to (default stdout)", + ) + parser.add_option( + "--env", + dest="use_env", + action="store_true", + help="Put the environment in as top-level variables", + ) + options, args = parser.parse_args(args) + if len(args) < 1: + print("You must give a template filename") + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if "=" not in value: + print("Bad argument: %r" % value) + sys.exit(2) + name, value = value.split("=", 1) + if name.startswith("py:"): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == "-": + template_content = sys.stdin.read() + template_name = "" + else: + with open(template_name, "rb") as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, "wb") as f: + f.write(result) + else: + sys.stdout.write(result) + + +if __name__ == "__main__": + fill_command() diff --git a/numpy/_configtool.pyi b/numpy/_configtool.pyi new file mode 100644 index 000000000000..7e7363e797f3 --- /dev/null +++ b/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 3a2bf40d0565..98a94973383a 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -10,7 +10,7 @@ """ from numpy._core.function_base import add_newdoc -from numpy._core.overrides import array_function_like_doc +from numpy._core.overrides import get_array_function_like_doc ############################################################################### @@ -581,8 +581,6 @@ Resolve all writeback semantics in writeable operands. - .. versionadded:: 1.15.0 - See Also -------- @@ -696,8 +694,6 @@ """ Number of dimensions of broadcasted result. Alias for `nd`. - .. versionadded:: 1.12.0 - Examples -------- >>> import numpy as np @@ -919,7 +915,7 @@ >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] - array([1, 3]) + array([1, 3], dtype=int32) Creating an array from sub-classes: @@ -931,10 +927,7 @@ matrix([[1, 2], [3, 4]]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'asarray', """ @@ -1026,14 +1019,11 @@ >>> np.asanyarray(a) is a True - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'asanyarray', """ - asanyarray(a, dtype=None, order=None, *, like=None) + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) Convert the input to an ndarray, but pass ndarray subclasses through. @@ -1105,10 +1095,7 @@ >>> np.asanyarray(a) is a True - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'ascontiguousarray', """ @@ -1171,10 +1158,7 @@ Note: This function returns an array with at least one-dimension (1-d) so it will not preserve 0-d arrays. - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'asfortranarray', """ @@ -1237,10 +1221,7 @@ Note: This function returns an array with at least one-dimension (1-d) so it will not preserve 0-d arrays. - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'empty', """ @@ -1300,10 +1281,7 @@ array([[-1073741821, -1067949133], [ 496041986, 19249760]]) #uninitialized - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'scalar', """ @@ -1374,10 +1352,7 @@ array([(0, 0), (0, 0)], dtype=[('x', '>> np.fromstring('1, 2', dtype=int, sep=',') array([1, 2]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'compare_chararrays', """ @@ -1545,10 +1513,7 @@ [5, 6]]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'fromfile', """ @@ -1564,19 +1529,11 @@ ---------- file : file or str or Path Open file object or filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - dtype : data-type Data type of the returned array. For binary files, it is used to determine the size and byte-order of the items in the file. Most builtin numeric types are supported and extension types may be supported. - - .. versionadded:: 1.18.0 - Complex dtypes. - count : int Number of items to read. ``-1`` means all items (i.e., the complete file). @@ -1589,8 +1546,6 @@ offset : int The offset (in bytes) from the file's current position. Defaults to 0. Only permitted for binary files. - - .. versionadded:: 1.17.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1641,10 +1596,7 @@ array([((10, 0), 98.25)], dtype=[('time', [('min', '>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) array([1, 2, 3], dtype=uint8) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', 'from_dlpack', """ @@ -1873,10 +1822,7 @@ >>> np.arange(3,7,2) array([3, 5]) - """.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - )) + """) add_newdoc('numpy._core.multiarray', '_get_ndarray_c_version', """_get_ndarray_c_version() @@ -1919,8 +1865,6 @@ ----- Please see `numpy.result_type` for additional information about promotion. - .. versionadded:: 1.6.0 - Starting in NumPy 1.9, promote_types function now returns a valid string length when given an integer or float dtype as one argument and a string dtype as another argument. Previously it always returned the input string @@ -2034,8 +1978,6 @@ Notes ----- - .. versionadded:: 1.6.0 - The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. @@ -2107,8 +2049,6 @@ The examples below have corresponding `einsum` calls with the two parameter methods. - .. versionadded:: 1.10.0 - Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` @@ -3219,8 +3159,6 @@ Refer to `numpy.argpartition` for full documentation. - .. versionadded:: 1.8.0 - See Also -------- numpy.argpartition : equivalent function @@ -3272,18 +3210,6 @@ is a new array of the same shape as the input array, with dtype, order given by `dtype`, `order`. - Notes - ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the max - integer/float value converted. - Raises ------ ComplexWarning @@ -3345,12 +3271,12 @@ ``A.view(A.dtype.newbyteorder()).byteswap()`` produces an array with the same values but different representation in memory - >>> A = np.array([1, 2, 3]) + >>> A = np.array([1, 2, 3],dtype=np.int64) >>> A.view(np.uint8) array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0], dtype=uint8) >>> A.view(A.dtype.newbyteorder()).byteswap(inplace=True) - array([1, 2, 3]) + array([1, 2, 3], dtype='>i8') >>> A.view(np.uint8) array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3], dtype=uint8) @@ -3569,9 +3495,6 @@ file : str or Path A string naming the dump file. - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - """)) @@ -4195,10 +4118,6 @@ and 'mergesort' use timsort under the covers and, in general, the actual implementation will vary with datatype. The 'mergesort' option is retained for backwards compatibility. - - .. versionchanged:: 1.15.0 - The 'stable' option was added. - order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -4254,8 +4173,6 @@ located to its right. The ordering of the elements in the two partitions on the either side of the k-th element in the output array is undefined. - .. versionadded:: 1.8.0 - Parameters ---------- kth : int or sequence of ints @@ -4393,10 +4310,6 @@ ---------- fid : file or str or Path An open file object, or a string containing a filename. - - .. versionchanged:: 1.17.0 - `pathlib.Path` objects are now accepted. - sep : str Separator between array items for text output. If "" (empty), a binary file is written, equivalent to @@ -4459,7 +4372,7 @@ >>> a = np.uint32([1, 2]) >>> a_list = list(a) >>> a_list - [1, 2] + [np.uint32(1), np.uint32(2)] >>> type(a_list[0]) >>> a_tolist = a.tolist() @@ -4497,8 +4410,6 @@ data memory. The bytes object is produced in C-order by default. This behavior is controlled by the ``order`` parameter. - .. versionadded:: 1.9.0 - Parameters ---------- order : {'C', 'F', 'A'}, optional @@ -5258,8 +5169,6 @@ dimension of the input array. `axis` may be negative, in which case it counts from the last to the first axis. - .. versionadded:: 1.7.0 - If this is None, a reduction is performed over all the axes. If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. @@ -5278,32 +5187,22 @@ a freshly-allocated array is returned. For consistency with ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. - - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `array`. - - .. versionadded:: 1.7.0 initial : scalar, optional The value with which to start the reduction. If the ufunc has no identity or the dtype is object, this defaults to None - otherwise it defaults to ufunc.identity. If ``None`` is given, the first element of the reduction is used, and an error is thrown if the reduction is empty. - - .. versionadded:: 1.15.0 - where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of `array`, and selects elements to include in the reduction. Note that for ufuncs like ``minimum`` that do not have an identity defined, one has to pass in also ``initial``. - .. versionadded:: 1.17.0 - Returns ------- r : ndarray @@ -5397,9 +5296,6 @@ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - Returns ------- r : ndarray @@ -5481,9 +5377,6 @@ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. - .. versionchanged:: 1.13.0 - Tuples are allowed for keyword argument. - Returns ------- r : ndarray @@ -5632,8 +5525,6 @@ increment the first element once because of buffering, whereas ``add.at(a, [0,0], 1)`` will increment the first element twice. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -5741,7 +5632,7 @@ >>> np.add.resolve_dtypes((float32, float, None)) (dtype('float32'), dtype('float32'), dtype('float32')) - Where the Python ``float`` behaves samilar to a Python value ``0.0`` + Where the Python ``float`` behaves similar to a Python value ``0.0`` in a ufunc call. (See :ref:`NEP 50 ` for details.) """)) @@ -6325,8 +6216,6 @@ Number of dimensions of the sub-array if this data type describes a sub-array, and ``0`` otherwise. - .. versionadded:: 1.13.0 - Examples -------- >>> import numpy as np @@ -6577,8 +6466,6 @@ Once a busdaycalendar object is created, the weekmask and holidays cannot be modified. - .. versionadded:: 1.7.0 - Parameters ---------- weekmask : str or array_like of bool, optional @@ -6647,8 +6534,6 @@ Used internally by all axis-checking logic. - .. versionadded:: 1.13.0 - Parameters ---------- axis : int diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index d7f2853e94ca..52035e9fb4ae 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -292,12 +292,12 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): ``1970-01-01T00:00:00``. If created from string, the string can be in ISO 8601 date or datetime format. - + When parsing a string to create a datetime object, if the string contains - a trailing timezone (A 'Z' or a timezone offset), the timezone will be + a trailing timezone (A 'Z' or a timezone offset), the timezone will be dropped and a User Warning is given. - - Datetime64 objects should be considered to be UTC and therefore have an + + Datetime64 objects should be considered to be UTC and therefore have an offset of +0000. >>> np.datetime64(10, 'Y') diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 2908813e7747..28ee8eaa8c58 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -5,7 +5,7 @@ """ from .overrides import ( array_function_dispatch, - set_array_function_like_doc, + finalize_array_function_like, set_module, ) from .multiarray import array, asanyarray @@ -24,7 +24,7 @@ } -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def require(a, dtype=None, requirements=None, *, like=None): """ diff --git a/numpy/_core/_asarray.pyi b/numpy/_core/_asarray.pyi index 5cd49659480e..356d31b009e8 100644 --- a/numpy/_core/_asarray.pyi +++ b/numpy/_core/_asarray.pyi @@ -1,19 +1,19 @@ from collections.abc import Iterable -from typing import Any, TypeVar, overload, Literal +from typing import Any, TypeAlias, TypeVar, overload, Literal from numpy._typing import NDArray, DTypeLike, _SupportsArrayFunc _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_Requirements = Literal[ +_Requirements: TypeAlias = Literal[ "C", "C_CONTIGUOUS", "CONTIGUOUS", "F", "F_CONTIGUOUS", "FORTRAN", "A", "ALIGNED", "W", "WRITEABLE", "O", "OWNDATA" ] -_E = Literal["E", "ENSUREARRAY"] -_RequirementsWithE = _Requirements | _E +_E: TypeAlias = Literal["E", "ENSUREARRAY"] +_RequirementsWithE: TypeAlias = _Requirements | _E @overload def require( diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py index 2b1812f48f98..d6e2d1496f28 100644 --- a/numpy/_core/_machar.py +++ b/numpy/_core/_machar.py @@ -115,7 +115,7 @@ def __init__(self, float_conv=float,int_conv=int, """ # We ignore all errors here because we are purposely triggering - # underflow to detect the properties of the runninng arch. + # underflow to detect the properties of the running arch. with errstate(under='ignore'): self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index 388854e664a5..03c673fc0ff8 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -14,7 +14,6 @@ from numpy._core.multiarray import asanyarray from numpy._core import numerictypes as nt from numpy._core import _exceptions -from numpy._core._ufunc_config import _no_nep50_warning from numpy._globals import _NoValue # save those O(100) nanoseconds! @@ -135,9 +134,8 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) if isinstance(ret, mu.ndarray): - with _no_nep50_warning(): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) if is_float16_result and out is None: ret = arr.dtype.type(ret) elif hasattr(ret, 'dtype'): @@ -180,9 +178,8 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # matching rcount to arrmean when where is specified as array div = rcount.reshape(arrmean.shape) if isinstance(arrmean, mu.ndarray): - with _no_nep50_warning(): - arrmean = um.true_divide(arrmean, div, out=arrmean, - casting='unsafe', subok=False) + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) elif hasattr(arrmean, "dtype"): arrmean = arrmean.dtype.type(arrmean / rcount) else: @@ -212,9 +209,8 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, # divide by degrees of freedom if isinstance(ret, mu.ndarray): - with _no_nep50_warning(): - ret = um.true_divide( - ret, rcount, out=ret, casting='unsafe', subok=False) + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) elif hasattr(ret, 'dtype'): ret = ret.dtype.type(ret / rcount) else: diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 80a59e7b3f52..b8ea3851f0e5 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -63,7 +63,7 @@ # extra aliases are added only to `sctypeDict` # to support dtype name access, such as`np.dtype("float")` -_extra_aliases = { +_extra_aliases = { "float": "float64", "complex": "complex128", "object": "object_", @@ -104,8 +104,8 @@ # find proper group for each concrete type for type_group, abstract_type in [ - ("int", ma.signedinteger), ("uint", ma.unsignedinteger), - ("float", ma.floating), ("complex", ma.complexfloating), + ("int", ma.signedinteger), ("uint", ma.unsignedinteger), + ("float", ma.floating), ("complex", ma.complexfloating), ("others", ma.generic) ]: if issubclass(concrete_type, abstract_type): diff --git a/numpy/_core/_type_aliases.pyi b/numpy/_core/_type_aliases.pyi index 1adaa933239e..f92958a67d55 100644 --- a/numpy/_core/_type_aliases.pyi +++ b/numpy/_core/_type_aliases.pyi @@ -1,3 +1,96 @@ -from numpy import generic +from collections.abc import Collection +from typing import Any, Final, Literal as L, TypeAlias, TypedDict, type_check_only -sctypeDict: dict[int | str, type[generic]] +import numpy as np + +__all__ = ( + "_abstract_type_names", + "_aliases", + "_extra_aliases", + "allTypes", + "c_names_dict", + "sctypeDict", + "sctypes", +) + +sctypeDict: Final[dict[str, type[np.generic]]] +allTypes: Final[dict[str, type[np.generic]]] + +@type_check_only +class _CNamesDict(TypedDict): + BOOL: np.dtype[np.bool] + HALF: np.dtype[np.half] + FLOAT: np.dtype[np.single] + DOUBLE: np.dtype[np.double] + LONGDOUBLE: np.dtype[np.longdouble] + CFLOAT: np.dtype[np.csingle] + CDOUBLE: np.dtype[np.cdouble] + CLONGDOUBLE: np.dtype[np.clongdouble] + STRING: np.dtype[np.bytes_] + UNICODE: np.dtype[np.str_] + VOID: np.dtype[np.void] + OBJECT: np.dtype[np.object_] + DATETIME: np.dtype[np.datetime64] + TIMEDELTA: np.dtype[np.timedelta64] + BYTE: np.dtype[np.byte] + UBYTE: np.dtype[np.ubyte] + SHORT: np.dtype[np.short] + USHORT: np.dtype[np.ushort] + INT: np.dtype[np.intc] + UINT: np.dtype[np.uintc] + LONG: np.dtype[np.long] + ULONG: np.dtype[np.ulong] + LONGLONG: np.dtype[np.longlong] + ULONGLONG: np.dtype[np.ulonglong] + +c_names_dict: Final[_CNamesDict] + +_AbstractTypeName: TypeAlias = L[ + "generic", + "flexible", + "character", + "number", + "integer", + "inexact", + "unsignedinteger", + "signedinteger", + "floating", + "complexfloating", +] +_abstract_type_names: Final[set[_AbstractTypeName]] + +@type_check_only +class _AliasesType(TypedDict): + double: L["float64"] + cdouble: L["complex128"] + single: L["float32"] + csingle: L["complex64"] + half: L["float16"] + bool_: L["bool"] + int_: L["intp"] + uint: L["intp"] + +_aliases: Final[_AliasesType] + +@type_check_only +class _ExtraAliasesType(TypedDict): + float: L["float64"] + complex: L["complex128"] + object: L["object_"] + bytes: L["bytes_"] + a: L["bytes_"] + int: L["int_"] + str: L["str_"] + unicode: L["str_"] + +_extra_aliases: Final[_ExtraAliasesType] + +@type_check_only +class _SCTypes(TypedDict): + int: Collection[type[np.signedinteger[Any]]] + uint: Collection[type[np.unsignedinteger[Any]]] + float: Collection[type[np.floating[Any]]] + complex: Collection[type[np.complexfloating[Any, Any]]] + others: Collection[type[np.flexible | np.bool | np.object_]] + +sctypes: Final[_SCTypes] diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index d60e7cbbda97..4563f66cb52f 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -4,7 +4,6 @@ This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and `_extobj_contextvar` from umath. """ -import collections.abc import contextlib import contextvars import functools @@ -14,7 +13,7 @@ __all__ = [ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", '_no_nep50_warning' + "errstate" ] @@ -77,7 +76,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): >>> import numpy as np >>> orig_settings = np.seterr(all='ignore') # seterr to known value >>> np.int16(32000) * np.int16(3) - 30464 + np.int16(30464) >>> np.seterr(over='raise') {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} >>> old_settings = np.seterr(all='warn', over='raise') @@ -90,7 +89,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): >>> np.geterr() {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} >>> np.int16(32000) * np.int16(3) - 30464 + np.int16(30464) >>> np.seterr(**orig_settings) # restore original {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} @@ -482,22 +481,3 @@ def inner(*args, **kwargs): _extobj_contextvar.reset(_token) return inner - - -NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False) - -@set_module('numpy') -@contextlib.contextmanager -def _no_nep50_warning(): - """ - Context manager to disable NEP 50 warnings. This context manager is - only relevant if the NEP 50 warnings are enabled globally (which is not - thread/context safe). - - This warning context manager itself is fully safe, however. - """ - token = NO_NEP50_WARNING.set(True) - try: - yield - finally: - NO_NEP50_WARNING.reset(token) diff --git a/numpy/_core/_ufunc_config.pyi b/numpy/_core/_ufunc_config.pyi index f56504507ac0..78c9660323d1 100644 --- a/numpy/_core/_ufunc_config.pyi +++ b/numpy/_core/_ufunc_config.pyi @@ -1,17 +1,21 @@ +from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Literal, TypedDict +from typing import Any, Literal, TypeAlias, TypedDict, type_check_only -from numpy import _SupportsWrite +from numpy import errstate as errstate -_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"] -_ErrFunc = Callable[[str, int], Any] +_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrFunc: TypeAlias = Callable[[str, int], Any] +_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str] +@type_check_only class _ErrDict(TypedDict): divide: _ErrKind over: _ErrKind under: _ErrKind invalid: _ErrKind +@type_check_only class _ErrDictOptional(TypedDict, total=False): all: None | _ErrKind divide: None | _ErrKind @@ -29,9 +33,7 @@ def seterr( def geterr() -> _ErrDict: ... def setbufsize(size: int) -> int: ... def getbufsize() -> int: ... -def seterrcall( - func: None | _ErrFunc | _SupportsWrite[str] -) -> None | _ErrFunc | _SupportsWrite[str]: ... -def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ... +def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... +def geterrcall() -> _ErrCall | None: ... # See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings` diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index 4297e109ce8a..d95093a6a4e1 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -69,7 +69,13 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, if sign not in [None, '-', '+', ' ']: raise ValueError("sign option must be one of ' ', '+', or '-'") - if legacy == False: + if legacy is False: + options['legacy'] = sys.maxsize + elif legacy == False: # noqa: E712 + warnings.warn( + f"Passing `legacy={legacy!r}` is deprecated.", + FutureWarning, stacklevel=3 + ) options['legacy'] = sys.maxsize elif legacy == '1.13': options['legacy'] = 113 @@ -77,12 +83,14 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, options['legacy'] = 121 elif legacy == '1.25': options['legacy'] = 125 + elif legacy == '2.1': + options['legacy'] = 201 elif legacy is None: pass # OK, do nothing. else: warnings.warn( "legacy printing option can currently only be '1.13', '1.21', " - "'1.25', or `False`", stacklevel=3) + "'1.25', '2.1, or `False`", stacklevel=3) if threshold is not None: # forbid the bad threshold arg suggested by stack overflow, gh-12351 @@ -208,14 +216,16 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, that numeric scalars are printed without their type information, e.g. as ``3.0`` rather than ``np.float64(3.0)``. + If set to ``'2.1'``, shape information is not given when arrays are + summarized (i.e., multiple elements replaced with ``...``). + If set to `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward compatibility. - .. versionadded:: 1.14.0 .. versionchanged:: 1.22.0 - .. versionchanged:: 2.0 + .. versionchanged:: 2.2 override_repr: callable, optional If set a passed function will be used for generating arrays' repr. @@ -244,7 +254,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, >>> np.set_printoptions(threshold=5) >>> np.arange(10) - array([0, 1, 2, ..., 7, 8, 9]) + array([0, 1, 2, ..., 7, 8, 9], shape=(10,)) Small results can be suppressed: @@ -277,9 +287,18 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, >>> with np.printoptions(precision=2, suppress=True, threshold=5): ... np.linspace(0, 10, 10) - array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,)) """ + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, suppress, nanstr, infstr, sign, formatter, floatmode, legacy) @@ -293,8 +312,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, if updated_opt['legacy'] == 113: updated_opt['sign'] = '-' - token = format_options.set(updated_opt) - return token + return format_options.set(updated_opt) @set_module('numpy') @@ -378,8 +396,9 @@ def printoptions(*args, **kwargs): -------- set_printoptions, get_printoptions - """ - token = set_printoptions(*args, **kwargs) + """ + token = _set_printoptions(*args, **kwargs) + try: yield get_printoptions() finally: @@ -696,8 +715,6 @@ def array2string(a, max_line_width=None, precision=None, `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward compatibility. - .. versionadded:: 1.14.0 - Returns ------- array_str : str @@ -1566,39 +1583,41 @@ def _array_repr_implementation( else: class_name = "array" - skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 - prefix = class_name + "(" - suffix = ")" if skipdtype else "," - if (current_options['legacy'] <= 113 and arr.shape == () and not arr.dtype.names): lst = repr(arr.item()) - elif arr.size > 0 or arr.shape == (0,): + else: lst = array2string(arr, max_line_width, precision, suppress_small, - ', ', prefix, suffix=suffix) - else: # show zero-length shape unless it is (0,) - lst = "[], shape=%s" % (repr(arr.shape),) - - arr_str = prefix + lst + suffix - - if skipdtype: - return arr_str - - dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) - - # compute whether we should put dtype on a new line: Do so if adding the - # dtype would extend the last line past max_line_width. + ', ', prefix, suffix=")") + + # Add dtype and shape information if these cannot be inferred from + # the array string. + extras = [] + if (arr.size == 0 and arr.shape != (0,) + or current_options['legacy'] > 210 + and arr.size > current_options['threshold']): + extras.append(f"shape={arr.shape}") + if not dtype_is_implied(arr.dtype) or arr.size == 0: + extras.append(f"dtype={dtype_short_repr(arr.dtype)}") + + if not extras: + return prefix + lst + ")" + + arr_str = prefix + lst + "," + extra_str = ", ".join(extras) + ")" + # compute whether we should put extras on a new line: Do so if adding the + # extras would extend the last line past max_line_width. # Note: This line gives the correct result even when rfind returns -1. last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) spacer = " " if current_options['legacy'] <= 113: if issubclass(arr.dtype.type, flexible): - spacer = '\n' + ' '*len(class_name + "(") - elif last_line_len + len(dtype_str) + 1 > max_line_width: - spacer = '\n' + ' '*len(class_name + "(") + spacer = '\n' + ' '*len(prefix) + elif last_line_len + len(extra_str) + 1 > max_line_width: + spacer = '\n' + ' '*len(prefix) - return arr_str + spacer + dtype_str + return arr_str + spacer + extra_str def _array_repr_dispatcher( diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 44d77083cd63..661d58a22fe3 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -1,43 +1,55 @@ from collections.abc import Callable -from typing import Any, Literal, TypedDict, SupportsIndex # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager +from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypedDict, overload, type_check_only + +from typing_extensions import deprecated import numpy as np -from numpy import ( - integer, - timedelta64, - datetime64, - floating, - complexfloating, - void, - longdouble, - clongdouble, -) from numpy._typing import NDArray, _CharLike_co, _FloatLike_co -_FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] + +### +_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] +_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] +_Sign: TypeAlias = Literal["-", "+", " "] +_Trim: TypeAlias = Literal["k", ".", "0", "-"] +_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] + +@type_check_only class _FormatDict(TypedDict, total=False): bool: Callable[[np.bool], str] - int: Callable[[integer[Any]], str] - timedelta: Callable[[timedelta64], str] - datetime: Callable[[datetime64], str] - float: Callable[[floating[Any]], str] - longfloat: Callable[[longdouble], str] - complexfloat: Callable[[complexfloating[Any, Any]], str] - longcomplexfloat: Callable[[clongdouble], str] - void: Callable[[void], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] numpystr: Callable[[_CharLike_co], str] object: Callable[[object], str] all: Callable[[object], str] - int_kind: Callable[[integer[Any]], str] - float_kind: Callable[[floating[Any]], str] - complex_kind: Callable[[complexfloating[Any, Any]], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] str_kind: Callable[[_CharLike_co], str] +@type_check_only class _FormatOptions(TypedDict): precision: int threshold: int @@ -46,10 +58,14 @@ class _FormatOptions(TypedDict): suppress: bool nanstr: str infstr: str - formatter: None | _FormatDict - sign: Literal["-", "+", " "] + formatter: _FormatDict | None + sign: _Sign floatmode: _FloatMode - legacy: Literal[False, "1.13", "1.21"] + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented def set_printoptions( precision: None | SupportsIndex = ..., @@ -60,37 +76,112 @@ def set_printoptions( nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ..., - override_repr: None | Callable[[NDArray[Any]], str] = ..., + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> None: ... def get_printoptions() -> _FormatOptions: ... + +# public numpy export +@overload # no style def array2string( a: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., - separator: str = ..., - prefix: str = ..., - # NOTE: With the `style` argument being deprecated, - # all arguments between `formatter` and `suffix` are de facto - # keyworld-only arguments + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", *, - formatter: None | _FormatDict = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., - suffix: str = ..., - legacy: Literal[None, False, "1.13", "1.21"] = ..., + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _Legacy | None = None, ) -> str: ... +@overload # style= (positional), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (keyword), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (positional), legacy!="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _LegacyNoStyle | None = None, +) -> str: ... +@overload # style= (keyword), legacy="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _LegacyNoStyle | None = None, +) -> str: ... + def format_float_scientific( x: _FloatLike_co, precision: None | int = ..., unique: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., + trim: _Trim = "k", sign: bool = ..., pad_left: None | int = ..., exp_digits: None | int = ..., @@ -101,7 +192,7 @@ def format_float_positional( precision: None | int = ..., unique: bool = ..., fractional: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., + trim: _Trim = "k", sign: bool = ..., pad_left: None | int = ..., pad_right: None | int = ..., @@ -128,8 +219,9 @@ def printoptions( nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + sign: None | _Sign = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index 4ce44ada45bf..abc5b969c6c7 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -76,4 +76,5 @@ # Version 18 (NumPy 2.0.0) 0x00000012 = 2b8f1f4da822491ff030b2b37dff07e3 # Version 19 (NumPy 2.1.0) Only header additions +# Version 19 (NumPy 2.2.0) No change 0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index da2f8f636e59..3eb03b208ab6 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -85,7 +85,7 @@ def get_processor(): join('multiarray', 'stringdtype', 'static_string.c'), join('multiarray', 'strfuncs.c'), join('multiarray', 'usertypes.c'), - join('umath', 'dispatching.c'), + join('umath', 'dispatching.cpp'), join('umath', 'extobj.c'), join('umath', 'loops.c.src'), join('umath', 'reduction.c'), diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 64d6a19a871d..c810de1aec5f 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -5,8 +5,6 @@ """ import os import re -import struct -import sys import textwrap import argparse @@ -1154,6 +1152,22 @@ def english_upper(s): TD(O), signature='(n),(n)->()', ), +'matvec': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.matvec'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(m,n),(n)->(m)', + ), +'vecmat': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.vecmat'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(n),(n,m)->(m)', + ), 'str_len': Ufunc(1, 1, Zero, docstrings.get('numpy._core.umath.str_len'), @@ -1410,7 +1424,7 @@ def make_arrays(funcdict): ) from None astype = '' - if not t.astype is None: + if t.astype is not None: astype = '_As_%s' % thedict[t.astype] astr = ('%s_functions[%d] = PyUFunc_%s%s;' % (name, k, thedict[t.type], astype)) @@ -1578,13 +1592,10 @@ def make_code(funcdict, filename): #include "matmul.h" #include "clip.h" #include "dtypemeta.h" + #include "dispatching.h" #include "_umath_doc_generated.h" %s - /* Returns a borrowed ref of the second value in the matching info tuple */ - PyObject * - get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, - int ndtypes); static int InitOperators(PyObject *dictionary) { diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 2e4d694065fb..c9ef4b8d533b 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -44,7 +44,7 @@ def add_newdoc(place, name, doc): skip = ( # gufuncs do not use the OUT_SCALAR replacement strings - 'matmul', 'vecdot', + 'matmul', 'vecdot', 'matvec', 'vecmat', # clip has 3 inputs, which is not handled by this 'clip', ) @@ -59,7 +59,7 @@ def add_newdoc(place, name, doc): for k, v in subst.items(): doc = doc.replace('$' + k, v) - docdict['.'.join((place, name))] = doc + docdict[f'{place}.{name}'] = doc add_newdoc('numpy._core.umath', 'absolute', @@ -703,7 +703,7 @@ def add_newdoc(place, name, doc): array([ 6, 5, 255]) >>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32), ... np.array([4, 4, 4, 2147483647], dtype=np.int32)) - array([ 6, 5, 255, 2147483647]) + array([ 6, 5, 255, 2147483647], dtype=int32) >>> np.bitwise_or([True, True], [False, True]) array([ True, True]) @@ -795,7 +795,7 @@ def add_newdoc(place, name, doc): Returns ------- y : ndarray or scalar - The ceiling of each element in `x`, with `float` dtype. + The ceiling of each element in `x`. $OUT_SCALAR_1 See Also @@ -836,10 +836,6 @@ def add_newdoc(place, name, doc): -------- ceil, floor, rint, fix - Notes - ----- - .. versionadded:: 1.3.0 - Examples -------- >>> import numpy as np @@ -1029,8 +1025,6 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - rad2deg(x) is ``180 * x / pi``. Examples @@ -1068,10 +1062,6 @@ def add_newdoc(place, name, doc): The output array, element-wise Heaviside step function of `x1`. $OUT_SCALAR_2 - Notes - ----- - .. versionadded:: 1.13.0 - References ---------- .. [1] Wikipedia, "Heaviside step function", @@ -1272,12 +1262,6 @@ def add_newdoc(place, name, doc): -------- power - Notes - ----- - .. versionadded:: 1.3.0 - - - Examples -------- >>> import numpy as np @@ -1679,7 +1663,7 @@ def add_newdoc(place, name, doc): >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x - 242 + np.uint8(242) >>> np.binary_repr(x, width=8) '11110010' @@ -1687,7 +1671,7 @@ def add_newdoc(place, name, doc): >>> x = np.invert(np.array(13, dtype=np.uint16)) >>> x - 65522 + np.uint16(65522) >>> np.binary_repr(x, width=16) '1111111111110010' @@ -1867,8 +1851,6 @@ def add_newdoc(place, name, doc): """ Test element-wise for NaT (not a time) and return result as a boolean array. - .. versionadded:: 1.13.0 - Parameters ---------- x : array_like @@ -2170,8 +2152,6 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `2**z = x`. The convention is to return the `z` whose imaginary part lies in `(-pi, pi]`. @@ -2229,10 +2209,6 @@ def add_newdoc(place, name, doc): -------- logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2. - Notes - ----- - .. versionadded:: 1.3.0 - Examples -------- >>> import numpy as np @@ -2273,10 +2249,6 @@ def add_newdoc(place, name, doc): -------- logaddexp: Logarithm of the sum of exponentiations of the inputs. - Notes - ----- - .. versionadded:: 1.3.0 - Examples -------- >>> import numpy as np @@ -2674,8 +2646,6 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. @@ -2683,7 +2653,7 @@ def add_newdoc(place, name, doc): -------- >>> import numpy as np >>> np.fmax([2, 3, 4], [1, 5, 2]) - array([ 2., 5., 4.]) + array([ 2, 5, 4]) >>> np.fmax(np.eye(2), [0.5, 2]) array([[ 1. , 2. ], @@ -2733,8 +2703,6 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither x1 nor x2 are NaNs, but it is faster and does proper broadcasting. @@ -2809,9 +2777,6 @@ def add_newdoc(place, name, doc): For other keyword-only arguments, see the :ref:`ufunc docs `. - .. versionadded:: 1.16 - Now handles ufunc kwargs - Returns ------- y : ndarray @@ -2828,14 +2793,15 @@ def add_newdoc(place, name, doc): See Also -------- - vdot : Complex-conjugating dot product. + vecdot : Complex-conjugating dot product for stacks of vectors. + matvec : Matrix-vector product for stacks of matrices and vectors. + vecmat : Vector-matrix product for stacks of vectors and matrices. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. dot : alternative matrix product with different broadcasting rules. Notes ----- - The behavior depends on the arguments in the following way. - If both arguments are 2-D they are multiplied like conventional @@ -2844,10 +2810,10 @@ def add_newdoc(place, name, doc): matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication - the prepended 1 is removed. + the prepended 1 is removed. (For stacks of vectors, use ``vecmat``.) - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication - the appended 1 is removed. + the appended 1 is removed. (For stacks of vectors, use ``matvec``.) ``matmul`` differs from ``dot`` in two important ways: @@ -2924,8 +2890,6 @@ def add_newdoc(place, name, doc): >>> x2 = np.array([2j, 3j]) >>> x1 @ x2 (-13+0j) - - .. versionadded:: 1.10.0 """) add_newdoc('numpy._core.umath', 'vecdot', @@ -2942,14 +2906,16 @@ def add_newdoc(place, name, doc): where :math:`\\overline{a_i}` denotes the complex conjugate if :math:`a_i` is complex and the identity otherwise. + .. versionadded:: 2.0.0 + Parameters ---------- x1, x2 : array_like Input arrays, scalars not allowed. out : ndarray, optional A location into which the result is stored. If provided, it must have - a shape that the broadcasted shape of `x1` and `x2` with the last axis - removed. If not provided or None, a freshly-allocated array is used. + the broadcasted shape of `x1` and `x2` with the last axis removed. + If not provided or None, a freshly-allocated array is used. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. @@ -2971,6 +2937,9 @@ def add_newdoc(place, name, doc): See Also -------- vdot : same but flattens arguments first + matmul : Matrix-matrix product. + vecmat : Vector-matrix product. + matvec : Matrix-vector product. einsum : Einstein summation convention. Examples @@ -2984,7 +2953,137 @@ def add_newdoc(place, name, doc): >>> np.vecdot(v, n) array([ 3., 8., 10.]) - .. versionadded:: 2.0.0 + """) + +add_newdoc('numpy._core.umath', 'matvec', + """ + Matrix-vector dot product of two arrays. + + Given a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x1`` and + a vector (or stack of vectors) :math:`\\mathbf{v}` in ``x2``, the + matrix-vector product is defined as: + + .. math:: + \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + + where the sum is over the last dimensions in ``x1`` and ``x2`` + (unless ``axes`` is specified). (For a matrix-vector product with the + vector conjugated, use ``np.vecmat(x2, x1.mT)``.) + + .. versionadded:: 2.2.0 + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The matrix-vector product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + vecmat : Vector-matrix product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Rotate a set of vectors from Y to X along Z. + + >>> a = np.array([[0., 1., 0.], + ... [-1., 0., 0.], + ... [0., 0., 1.]]) + >>> v = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 1.], + ... [0., 6., 8.]]) + >>> np.matvec(a, v) + array([[ 0., -1., 0.], + [ 1., 0., 0.], + [ 0., 0., 1.], + [ 6., 0., 8.]]) + + """) + +add_newdoc('numpy._core.umath', 'vecmat', + """ + Vector-matrix dot product of two arrays. + + Given a vector (or stack of vector) :math:`\\mathbf{v}` in ``x1`` and + a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x2``, the + vector-matrix product is defined as: + + .. math:: + \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + + where the sum is over the last dimension of ``x1`` and the one-but-last + dimensions in ``x2`` (unless `axes` is specified) and where + :math:`\\overline{v_i}` denotes the complex conjugate if :math:`v` + is complex and the identity otherwise. (For a non-conjugated vector-matrix + product, use ``np.matvec(x2.mT, x1)``.) + + .. versionadded:: 2.2.0 + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The vector-matrix product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and the one-but-last dimension of + ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + matvec : Matrix-vector product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Project a vector along X and Y. + + >>> v = np.array([0., 4., 2.]) + >>> a = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 0.]]) + >>> np.vecmat(v, a) + array([ 0., 4., 0.]) + """) add_newdoc('numpy._core.umath', 'modf', @@ -3109,8 +3208,6 @@ def add_newdoc(place, name, doc): """ Numerical positive, element-wise. - .. versionadded:: 1.13.0 - Parameters ---------- x : array_like or scalar @@ -3299,8 +3396,6 @@ def add_newdoc(place, name, doc): To get complex results, cast the input to complex, or specify the ``dtype`` to be ``complex`` (see the example below). - .. versionadded:: 1.12.0 - Parameters ---------- x1 : array_like @@ -3427,8 +3522,6 @@ def add_newdoc(place, name, doc): Notes ----- - .. versionadded:: 1.3.0 - ``deg2rad(x)`` is ``x * pi / 180``. Examples @@ -3544,8 +3637,6 @@ def add_newdoc(place, name, doc): """ Return element-wise quotient and remainder simultaneously. - .. versionadded:: 1.13.0 - ``np.divmod(x, y)`` is equivalent to ``(x // y, x % y)``, but faster because it avoids redundant work. It is used to implement the Python built-in function ``divmod`` on NumPy arrays. @@ -3707,7 +3798,7 @@ def add_newdoc(place, name, doc): There is more than one definition of sign in common use for complex numbers. The definition used here, :math:`x/|x|`, is the more common and useful one, but is different from the one used in numpy prior to - version 2.0, :math:`x/\\sqrt{x*x}`, which is equivalent to + version 2.0, :math:`x/\\sqrt{x*x}`, which is equivalent to ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. Examples @@ -4015,8 +4106,6 @@ def add_newdoc(place, name, doc): """ Return the cube-root of an array, element-wise. - .. versionadded:: 1.10.0 - Parameters ---------- x : array_like @@ -4263,7 +4352,7 @@ def add_newdoc(place, name, doc): array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875, 0.5 ]) >>> y2 - array([0, 1, 2, 2, 3, 3, 3, 3, 4]) + array([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=int32) >>> y1 * 2**y2 array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.]) @@ -4411,7 +4500,7 @@ def add_newdoc(place, name, doc): -------- >>> import numpy as np >>> np.bitwise_count(1023) - 10 + np.uint8(10) >>> a = np.array([2**i - 1 for i in range(16)]) >>> np.bitwise_count(a) array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], @@ -4560,6 +4649,15 @@ def add_newdoc(place, name, doc): -------- str.isspace + Examples + -------- + >>> np.char.isspace(list("a b c")) + array([False, True, False, True, False]) + >>> np.char.isspace(b'\x0a \x0b \x0c') + np.True_ + >>> np.char.isspace(b'\x0a \x0b \x0c N') + np.False_ + """) add_newdoc('numpy._core.umath', 'isalnum', @@ -4650,7 +4748,7 @@ def add_newdoc(place, name, doc): array(True) >>> a = np.array(["hello", "HELLO", "Hello"]) >>> np.strings.isupper(a) - array([False, True, False]) + array([False, True, False]) """) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 6301556aaaa9..49ed5d38525e 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -216,7 +216,7 @@ def greater(x1, x2): See Also -------- equal, not_equal, greater_equal, less_equal, less - + Examples -------- >>> import numpy as np @@ -262,6 +262,7 @@ def less(x1, x2): return compare_chararrays(x1, x2, '<', True) +@set_module("numpy.char") def multiply(a, i): """ Return (a * i), that is string multiple concatenation, @@ -313,6 +314,7 @@ def multiply(a, i): raise ValueError("Can only multiply by integers") +@set_module("numpy.char") def partition(a, sep): """ Partition each element in `a` around `sep`. @@ -354,6 +356,7 @@ def partition(a, sep): return np.stack(strings_partition(a, sep), axis=-1) +@set_module("numpy.char") def rpartition(a, sep): """ Partition (split) each element around the right-most separator. @@ -1269,6 +1272,15 @@ class adds the following functionality: fastest). If order is 'A', then the returned array may be in any order (either C-, Fortran-contiguous, or even discontiguous). + + Examples + -------- + + >>> import numpy as np + >>> char_array = np.char.array(['hello', 'world', 'numpy','array']) + >>> char_array + chararray(['hello', 'world', 'numpy', 'array'], dtype=' chararray[Any, dtype[bytes_]]: ... + ) -> chararray[_Shape, dtype[bytes_]]: ... @overload def __new__( subtype, @@ -57,12 +120,12 @@ class chararray(ndarray[_ShapeType_co, _CharDType]): offset: SupportsIndex = ..., strides: _ShapeLike = ..., order: _OrderKACF = ..., - ) -> chararray[Any, dtype[str_]]: ... + ) -> chararray[_Shape, dtype[str_]]: ... def __array_finalize__(self, obj: object) -> None: ... - def __mul__(self, other: i_co) -> chararray[Any, _CharDType]: ... - def __rmul__(self, other: i_co) -> chararray[Any, _CharDType]: ... - def __mod__(self, i: Any) -> chararray[Any, _CharDType]: ... + def __mul__(self, other: i_co) -> chararray[_Shape, _CharDType_co]: ... + def __rmul__(self, other: i_co) -> chararray[_Shape, _CharDType_co]: ... + def __mod__(self, i: Any) -> chararray[_Shape, _CharDType_co]: ... @overload def __eq__( @@ -210,7 +273,7 @@ class chararray(ndarray[_ShapeType_co, _CharDType]): def expandtabs( self, tabsize: i_co = ..., - ) -> chararray[Any, _CharDType]: ... + ) -> chararray[_Shape, _CharDType_co]: ... @overload def find( @@ -435,89 +498,119 @@ class chararray(ndarray[_ShapeType_co, _CharDType]): deletechars: None | S_co = ..., ) -> _CharArray[bytes_]: ... - def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... - def capitalize(self) -> chararray[_ShapeType_co, _CharDType]: ... - def title(self) -> chararray[_ShapeType_co, _CharDType]: ... - def swapcase(self) -> chararray[_ShapeType_co, _CharDType]: ... - def lower(self) -> chararray[_ShapeType_co, _CharDType]: ... - def upper(self) -> chararray[_ShapeType_co, _CharDType]: ... - def isalnum(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def isalpha(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def isdigit(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def islower(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def isspace(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def istitle(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def isupper(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def isnumeric(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - def isdecimal(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... - -__all__: list[str] + def zfill(self, width: i_co) -> chararray[_Shape, _CharDType_co]: ... + def capitalize(self) -> chararray[_ShapeT_co, _CharDType_co]: ... + def title(self) -> chararray[_ShapeT_co, _CharDType_co]: ... + def swapcase(self) -> chararray[_ShapeT_co, _CharDType_co]: ... + def lower(self) -> chararray[_ShapeT_co, _CharDType_co]: ... + def upper(self) -> chararray[_ShapeT_co, _CharDType_co]: ... + def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + # Comparison @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... -# String operations @overload -def add(x1: U_co, x2: U_co) -> NDArray[str_]: ... +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ... +def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... @overload -def multiply(a: U_co, i: i_co) -> NDArray[str_]: ... +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @overload -def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ... +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... @overload -def mod(a: U_co, value: Any) -> NDArray[str_]: ... +def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[bytes_]: ... +def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... @overload def capitalize(a: U_co) -> NDArray[str_]: ... @overload def capitalize(a: S_co) -> NDArray[bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... @overload def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... def decode( a: S_co, encoding: None | str = ..., errors: None | str = ..., ) -> NDArray[str_]: ... - def encode( - a: U_co, + a: U_co | T_co, encoding: None | str = ..., errors: None | str = ..., ) -> NDArray[bytes_]: ... @@ -526,31 +619,55 @@ def encode( def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... @overload def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... @overload def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... +@overload +def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... @overload def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[str_]: ... @overload def lower(a: S_co) -> NDArray[bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... @overload def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... @overload def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def replace( @@ -566,6 +683,20 @@ def replace( new: S_co, count: None | i_co = ..., ) -> NDArray[bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = ..., +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = ..., +) -> _StringDTypeOrUnicodeArray: ... @overload def rjust( @@ -579,11 +710,27 @@ def rjust( width: i_co, fillchar: S_co = ..., ) -> NDArray[bytes_]: ... +@overload +def rjust( + a: _StringDTypeSupportsArray, + width: i_co, + fillchar: _StringDTypeSupportsArray = ..., +) -> _StringDTypeArray: ... +@overload +def rjust( + a: T_co, + width: i_co, + fillchar: T_co = ..., +) -> _StringDTypeOrUnicodeArray: ... @overload def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... @overload def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def rsplit( @@ -597,11 +744,27 @@ def rsplit( sep: None | S_co = ..., maxsplit: None | i_co = ..., ) -> NDArray[object_]: ... +@overload +def rsplit( + a: _StringDTypeSupportsArray, + sep: None | _StringDTypeSupportsArray = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: T_co, + sep: None | T_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... @overload def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... @overload def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def split( @@ -615,49 +778,90 @@ def split( sep: None | S_co = ..., maxsplit: None | i_co = ..., ) -> NDArray[object_]: ... - @overload -def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ... +def split( + a: _StringDTypeSupportsArray, + sep: None | _StringDTypeSupportsArray = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... @overload -def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ... +def split( + a: T_co, + sep: None | T_co = ..., + maxsplit: None | i_co = ..., +) -> NDArray[object_]: ... + +def splitlines(a: UST_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ... @overload def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ... @overload def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[str_]: ... @overload def swapcase(a: S_co) -> NDArray[bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def title(a: U_co) -> NDArray[str_]: ... @overload def title(a: S_co) -> NDArray[bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def translate( a: U_co, - table: U_co, - deletechars: None | U_co = ..., + table: str, + deletechars: None | str = ..., ) -> NDArray[str_]: ... @overload def translate( a: S_co, - table: S_co, - deletechars: None | S_co = ..., + table: str, + deletechars: None | str = ..., ) -> NDArray[bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: None | str = ..., +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: None | str = ..., +) -> _StringDTypeOrUnicodeArray: ... @overload def upper(a: U_co) -> NDArray[str_]: ... @overload def upper(a: S_co) -> NDArray[bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... @overload def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... # String information @overload @@ -674,6 +878,13 @@ def count( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def endswith( @@ -689,6 +900,13 @@ def endswith( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... @overload def find( @@ -704,6 +922,13 @@ def find( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def index( @@ -719,16 +944,23 @@ def index( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... -def isalpha(a: U_co | S_co) -> NDArray[np.bool]: ... -def isalnum(a: U_co | S_co) -> NDArray[np.bool]: ... -def isdecimal(a: U_co) -> NDArray[np.bool]: ... -def isdigit(a: U_co | S_co) -> NDArray[np.bool]: ... -def islower(a: U_co | S_co) -> NDArray[np.bool]: ... -def isnumeric(a: U_co) -> NDArray[np.bool]: ... -def isspace(a: U_co | S_co) -> NDArray[np.bool]: ... -def istitle(a: U_co | S_co) -> NDArray[np.bool]: ... -def isupper(a: U_co | S_co) -> NDArray[np.bool]: ... +def isalpha(a: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... +def isdigit(a: UST_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... +def isspace(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... @overload def rfind( @@ -744,6 +976,13 @@ def rfind( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def rindex( @@ -759,6 +998,13 @@ def rindex( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def startswith( @@ -774,8 +1020,15 @@ def startswith( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... -def str_len(A: U_co | S_co) -> NDArray[int_]: ... +def str_len(A: UST_co) -> NDArray[int_]: ... # Overload 1 and 2: str- or bytes-based array-likes # overload 3: arbitrary object with unicode=False (-> bytes_) diff --git a/numpy/_core/einsumfunc.py b/numpy/_core/einsumfunc.py index 7aa5f22fe939..f74dd46e1782 100644 --- a/numpy/_core/einsumfunc.py +++ b/numpy/_core/einsumfunc.py @@ -219,7 +219,7 @@ def _optimal_path(input_sets, output_set, idx_dict, memory_limit): return path def _parse_possible_contraction( - positions, input_sets, output_set, idx_dict, + positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost ): """Compute the cost (removed size + flops) and resultant indices for @@ -290,7 +290,7 @@ def _update_other_results(results, best): Parameters ---------- results : list - List of contraction results produced by + List of contraction results produced by ``_parse_possible_contraction``. best : list The best contraction of ``results`` i.e. the one that @@ -833,7 +833,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): >>> print(path_info[0]) ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] - >>> print(path_info[1]) + >>> print(path_info[1]) Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary Naive scaling: 8 Optimized scaling: 5 @@ -964,7 +964,7 @@ def einsum_path(*operands, optimize='greedy', einsum_call=False): # Build contraction tuple (positions, gemm, einsum_str, remaining) for cnum, contract_inds in enumerate(path): # Make sure we remove inds from right to left - contract_inds = tuple(sorted(list(contract_inds), reverse=True)) + contract_inds = tuple(sorted(contract_inds, reverse=True)) contract = _find_contraction(contract_inds, input_sets, output_set) out_inds, input_sets, idx_removed, idx_contract = contract @@ -1132,8 +1132,6 @@ def einsum(*operands, out=None, optimize=False, **kwargs): Notes ----- - .. versionadded:: 1.6.0 - The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. @@ -1183,7 +1181,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. - The difference is that `einsum` does not allow broadcasting by default. + The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. @@ -1191,7 +1189,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. - ``np.einsum('...i->...', a)`` is like + ``np.einsum('...i->...', a)`` is like :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix @@ -1210,16 +1208,12 @@ def einsum(*operands, out=None, optimize=False, **kwargs): The examples below have corresponding `einsum` calls with the two parameter methods. - .. versionadded:: 1.10.0 - Views returned from einsum are now writeable whenever the input array is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal of a 2D array. - .. versionadded:: 1.12.0 - Added the ``optimize`` argument which will optimize the contraction order of an einsum expression. For a contraction with three or more operands this can greatly increase the computational efficiency at the cost of @@ -1413,7 +1407,7 @@ def einsum(*operands, out=None, optimize=False, **kwargs): Optimal `einsum` (best usage pattern in some use cases): ~110ms - >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, ... optimize='optimal')[0] >>> for iteration in range(500): ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index 513f0635e35e..d7de9c02e16e 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import TypeVar, Any, overload, Literal +from typing import TypeAlias, TypeVar, Any, overload, Literal import numpy as np from numpy import number, _OrderKACF @@ -20,16 +20,17 @@ from numpy._typing import ( _DTypeLikeObject, ) +__all__ = ["einsum", "einsum_path"] + _ArrayType = TypeVar( "_ArrayType", bound=NDArray[np.bool | number[Any]], ) -_OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any] -_CastingSafe = Literal["no", "equiv", "safe", "same_kind"] -_CastingUnsafe = Literal["unsafe"] +_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] +_CastingUnsafe: TypeAlias = Literal["unsafe"] -__all__: list[str] # TODO: Properly handle the `casting`-based combinatorics # TODO: We need to evaluate the content `__subscripts` in order diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 45614511ecf0..202bcde9e570 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -134,9 +134,6 @@ def take(a, indices, axis=None, out=None, mode='raise'): The source array. indices : array_like (Nj...) The indices of the values to extract. - - .. versionadded:: 1.8.0 - Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened @@ -169,7 +166,6 @@ def take(a, indices, axis=None, out=None, mode='raise'): Notes ----- - By eliminating the inner loop in the description above, and using `s_` to build simple slice objects, `take` can be expressed in terms of applying fancy indexing to each 1-d slice:: @@ -207,13 +203,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, /, shape=None, *, newshape=None, order=None, +def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, copy=None): return (a,) @array_function_dispatch(_reshape_dispatcher) -def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): +def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): """ Gives a new shape to an array without changing its data. @@ -226,10 +222,6 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. - newshape : int or tuple of ints - .. deprecated:: 2.1 - Replaced by ``shape`` argument. Retained for backward - compatibility. order : {'C', 'F', 'A'}, optional Read the elements of ``a`` using this index order, and place the elements into the reshaped array using this index order. 'C' @@ -243,6 +235,10 @@ def reshape(a, /, shape=None, *, newshape=None, order='C', copy=None): 'A' means to read / write the elements in Fortran-like index order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. + newshape : int or tuple of ints + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. copy : bool, optional If ``True``, then the array data is copied. If ``None``, a copy will only be made if it's required by ``order``. For ``False`` it raises @@ -341,10 +337,9 @@ def choose(a, choices, out=None, mode='raise'): First of all, if confused or uncertain, definitely look at the Examples - in its full generality, this function is less simple than it might - seem from the following code description (below ndi = - `numpy.lib.index_tricks`): + seem from the following code description:: - ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. + np.choose(a,c) == np.array([c[a[I]][I] for I in np.ndindex(a.shape)]) But this omits some subtleties. Here is a fully general summary: @@ -653,10 +648,11 @@ def transpose(a, axes=None): Input array. axes : tuple or list of ints, optional If specified, it must be a tuple or list which contains a permutation - of [0,1,...,N-1] where N is the number of axes of `a`. The `i`'th axis - of the returned array will correspond to the axis numbered ``axes[i]`` - of the input. If not specified, defaults to ``range(a.ndim)[::-1]``, - which reverses the order of the axes. + of [0, 1, ..., N-1] where N is the number of axes of `a`. Negative + indices can also be used to specify axes. The i-th axis of the returned + array will correspond to the axis numbered ``axes[i]`` of the input. + If not specified, defaults to ``range(a.ndim)[::-1]``, which reverses + the order of the axes. Returns ------- @@ -699,6 +695,10 @@ def transpose(a, axes=None): >>> np.transpose(a).shape (5, 4, 3, 2) + >>> a = np.arange(3*4*5).reshape((3, 4, 5)) + >>> np.transpose(a, (-1, 0, -2)).shape + (5, 3, 4) + """ return _wrapfunc(a, 'transpose', axes) @@ -768,8 +768,6 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): partitions on the either side of the k-th element in the output array is undefined. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -883,8 +881,6 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): indices of the same shape as `a` that index data along the given axis in partitioned order. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -987,10 +983,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): and 'mergesort' use timsort or radix sort under the covers and, in general, the actual implementation will vary with data type. The 'mergesort' option is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. - order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -1061,8 +1053,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): placements are sorted according to the non-nan part if it exists. Non-nan values are sorted as before. - .. versionadded:: 1.12.0 - quicksort has been changed to: `introsort `_. When sorting does not make enough progress it switches to @@ -1079,8 +1069,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): ability to select the implementation and it is hardwired for the different data types. - .. versionadded:: 1.17.0 - Timsort is added for better performance on already or nearly sorted data. On random data timsort is almost identical to mergesort. It is now used for stable sort while quicksort is still the @@ -1090,8 +1078,6 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an O(n) sort instead of O(n log n). - .. versionchanged:: 1.18.0 - NaT now sorts to the end of arrays for consistency with NaN. Examples @@ -1162,9 +1148,6 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): and 'mergesort' use timsort under the covers and, in general, the actual implementation will vary with data type. The 'mergesort' option is retained for backwards compatibility. - - .. versionchanged:: 1.15.0. - The 'stable' option was added. order : str or list of str, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. A single field can @@ -1495,8 +1478,6 @@ def searchsorted(a, v, side='left', sorter=None): Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. - .. versionadded:: 1.7.0 - Returns ------- indices : int or array of ints @@ -1530,6 +1511,18 @@ def searchsorted(a, v, side='left', sorter=None): >>> np.searchsorted([11,12,13,14,15], [-10, 20, 12, 13]) array([0, 5, 1, 2]) + When `sorter` is used, the returned indices refer to the sorted + array of `a` and not `a` itself: + + >>> a = np.array([40, 10, 20, 30]) + >>> sorter = np.argsort(a) + >>> sorter + array([1, 2, 3, 0]) # Indices that would sort the array 'a' + >>> result = np.searchsorted(a, 25, sorter=sorter) + >>> result + 2 + >>> a[sorter[result]] + 30 # The element at index 2 of the sorted array is 30. """ return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) @@ -1635,8 +1628,6 @@ def squeeze(a, axis=None): a : array_like Input data. axis : None or int or tuple of ints, optional - .. versionadded:: 1.7.0 - Selects a subset of the entries of length one in the shape. If an axis is selected with shape entry greater than one, an error is raised. @@ -2285,8 +2276,6 @@ def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, For other keyword-only arguments, see the :ref:`ufunc docs `. - .. versionadded:: 1.17.0 - Returns ------- clipped_array : ndarray @@ -2359,11 +2348,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, axis : None or int or tuple of ints, optional Axis or axes along which a sum is performed. The default, axis=None, will sum all of the elements of the input array. If - axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If axis is a tuple of ints, a sum is performed on all of the axes + axis is negative it counts from the last to the first axis. If + axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. dtype : dtype, optional @@ -2389,14 +2375,9 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, exceptions will be raised. initial : scalar, optional Starting value for the sum. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to include in the sum. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- sum_along_axis : ndarray @@ -2447,7 +2428,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, >>> np.sum([0.5, 1.5]) 2.0 >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) - 1 + np.int32(1) >>> np.sum([[0, 1], [0, 5]]) 6 >>> np.sum([[0, 1], [0, 5]], axis=0) @@ -2460,7 +2441,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, If the accumulator is too small, overflow occurs: >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) - -128 + np.int8(-128) You can also start the sum with a value other than zero: @@ -2508,11 +2489,8 @@ def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Axis or axes along which a logical OR reduction is performed. The default (``axis=None``) is to perform a logical OR over all the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternate output array in which to place the result. It must have @@ -2621,11 +2599,8 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Axis or axes along which a logical AND reduction is performed. The default (``axis=None``) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in - which case it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - - If this is a tuple of ints, a reduction is performed on multiple + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional Alternate output array in which to place the result. @@ -2840,7 +2815,7 @@ def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. include_initial : bool, optional - Boolean indicating whether to include the initial value (ones) as + Boolean indicating whether to include the initial value (zeros) as the first value in the output. With ``include_initial=True`` the shape of the output is different than the shape of the input. Default: ``False``. @@ -3007,9 +2982,6 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): Axis along which to find the peaks. By default, flatten the array. `axis` may be negative, in which case it counts from the last to the first axis. - - .. versionadded:: 1.15.0 - If this is a tuple of ints, a reduction is performed on multiple axes, instead of a single axis or all the axes as before. out : array_like @@ -3090,12 +3062,9 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Input data. axis : None or int or tuple of ints, optional Axis or axes along which to operate. By default, flattened input is - used. - - .. versionadded:: 1.7.0 + used. If this is a tuple of ints, the maximum is selected over + multiple axes, instead of a single axis or all the axes as before. - If this is a tuple of ints, the maximum is selected over multiple axes, - instead of a single axis or all the axes as before. out : ndarray, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. @@ -3116,14 +3085,10 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, The minimum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to compare for the maximum. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- max : ndarray or scalar @@ -3236,8 +3201,6 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Axis or axes along which to operate. By default, flattened input is used. - .. versionadded:: 1.7.0 - If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : ndarray, optional @@ -3260,14 +3223,10 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, The maximum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to compare for the minimum. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- min : ndarray or scalar @@ -3382,8 +3341,6 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, input array. If axis is negative it counts from the last to the first axis. - .. versionadded:: 1.7.0 - If axis is a tuple of ints, a product is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. @@ -3411,15 +3368,10 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial : scalar, optional The starting value for this product. See `~numpy.ufunc.reduce` for details. - - .. versionadded:: 1.15.0 - where : array_like of bool, optional Elements to include in the product. See `~numpy.ufunc.reduce` for details. - .. versionadded:: 1.17.0 - Returns ------- product_along_axis : ndarray, see `dtype` parameter above. @@ -3799,8 +3751,6 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. - .. versionadded:: 1.7.0 - If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional @@ -3872,13 +3822,19 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.mean(a) - 0.54999924 + np.float32(0.54999924) Computing the mean in float64 is more accurate: >>> np.mean(a, dtype=np.float64) 0.55000000074505806 # may vary + Computing the mean in timedelta64 is available: + + >>> b = np.array([1, 3], dtype="timedelta64[D]") + >>> np.mean(b) + np.timedelta64(2,'D') + Specifying a where argument: >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) @@ -3927,9 +3883,6 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, axis : None or int or tuple of ints, optional Axis or axes along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. - - .. versionadded:: 1.7.0 - If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional @@ -3967,7 +3920,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, The axis for the calculation of the mean should be the same as used in the call to this std function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -4016,7 +3969,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N - 1}} - In statistics, the resulting quantity is sometimed called the "sample + In statistics, the resulting quantity is sometimes called the "sample standard deviation" because if `a` is a random sample from a larger population, this calculation provides the square root of an unbiased estimate of the variance of the population. The use of :math:`N-1` in the @@ -4053,7 +4006,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.std(a) - 0.45000005 + np.float32(0.45000005) Computing the standard deviation in float64 is more accurate: @@ -4135,9 +4088,6 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, axis : None or int or tuple of ints, optional Axis or axes along which the variance is computed. The default is to compute the variance of the flattened array. - - .. versionadded:: 1.7.0 - If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional @@ -4174,7 +4124,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, The axis for the calculation of the mean should be the same as used in the call to this var function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -4221,7 +4171,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, \frac{\sum_i{|a_i - \bar{a}|^2}}{N - 1} - In statistics, the resulting quantity is sometimed called the "sample + In statistics, the resulting quantity is sometimes called the "sample variance" because if `a` is a random sample from a larger population, this calculation provides an unbiased estimate of the variance of the population. The use of :math:`N-1` in the denominator is often called @@ -4256,7 +4206,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.var(a) - 0.20250003 + np.float32(0.20250003) Computing the variance in float64 is more accurate: @@ -4317,4 +4267,3 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) - diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0d4e30ce8101..0465cc5aaa54 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,5 +1,16 @@ from collections.abc import Sequence -from typing import Any, overload, TypeVar, Literal, SupportsIndex +from typing import ( + Any, + Literal, + NoReturn, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + overload, + type_check_only, +) +from typing_extensions import Never, deprecated import numpy as np from numpy import ( @@ -11,8 +22,10 @@ from numpy import ( float16, floating, complexfloating, + timedelta64, object_, generic, + _AnyShapeType, _OrderKACF, _OrderACF, _ModeKind, @@ -27,14 +40,15 @@ from numpy._typing import ( ArrayLike, _ArrayLike, NDArray, + _NestedSequence, _ShapeLike, - _Shape, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, _ArrayLikeObject_co, + _ArrayLikeTD64_co, _IntLike_co, _BoolLike_co, _ComplexLike_co, @@ -42,11 +56,71 @@ from numpy._typing import ( _ScalarLike_co, ) +__all__ = [ + "all", + "amax", + "amin", + "any", + "argmax", + "argmin", + "argpartition", + "argsort", + "around", + "choose", + "clip", + "compress", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "diagonal", + "mean", + "max", + "min", + "matrix_transpose", + "ndim", + "nonzero", + "partition", + "prod", + "ptp", + "put", + "ravel", + "repeat", + "reshape", + "resize", + "round", + "searchsorted", + "shape", + "size", + "sort", + "squeeze", + "std", + "sum", + "swapaxes", + "take", + "trace", + "transpose", + "var", +] + _SCT = TypeVar("_SCT", bound=generic) _SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) +_SizeType = TypeVar("_SizeType", bound=int) +_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) +_ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) + +@type_check_only +class _SupportsShape(Protocol[_ShapeType_co]): + # NOTE: it matters that `self` is positional only + @property + def shape(self, /) -> _ShapeType_co: ... -__all__: list[str] +# a "sequence" that isn't a string, bytes, bytearray, or memoryview +_T = TypeVar("_T") +_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +# `int` also covers `bool` +_PyScalar: TypeAlias = int | float | complex | bytes | str @overload def take( @@ -90,18 +164,73 @@ def take( ) -> _ArrayType: ... @overload +def reshape( # shape: index + a: _ArrayLike[_SCT], + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeType + a: _ArrayLike[_SCT], + /, + shape: _AnyShapeType, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeType, np.dtype[_SCT]]: ... +@overload # shape: Sequence[index] def reshape( a: _ArrayLike[_SCT], - newshape: _ShapeLike, - order: _OrderACF = ..., - copy: None | bool = ..., + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, ) -> NDArray[_SCT]: ... +@overload # shape: index +def reshape( + a: ArrayLike, + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... @overload +def reshape( # shape: (int, ...) @ _AnyShapeType + a: ArrayLike, + /, + shape: _AnyShapeType, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeType, np.dtype[Any]]: ... +@overload # shape: Sequence[index] def reshape( a: ArrayLike, + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[Any]: ... +@overload +@deprecated( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", +) +def reshape( + a: ArrayLike, + /, + shape: None = None, + order: _OrderACF = "C", + *, newshape: _ShapeLike, - order: _OrderACF = ..., - copy: None | bool = ..., + copy: bool | None = None, ) -> NDArray[Any]: ... @overload @@ -300,16 +429,23 @@ def searchsorted( sorter: None | _ArrayLikeInt_co = ..., # 1D int array ) -> NDArray[intp]: ... +# unlike `reshape`, `resize` only accepts positive integers, so literal ints can be used @overload -def resize( - a: _ArrayLike[_SCT], - new_shape: _ShapeLike, -) -> NDArray[_SCT]: ... +def resize(a: _ArrayLike[_SCT], new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[_SCT]]: ... @overload -def resize( - a: ArrayLike, - new_shape: _ShapeLike, -) -> NDArray[Any]: ... +def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... +@overload +def resize(a: _ArrayLike[_SCT], new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[_SCT]]: ... +@overload +def resize(a: _ArrayLike[_SCT], new_shape: Sequence[SupportsIndex]) -> NDArray[_SCT]: ... +@overload +def resize(a: ArrayLike, new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[Any]]: ... +@overload +def resize(a: ArrayLike, new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +@overload +def resize(a: ArrayLike, new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[Any]]: ... +@overload +def resize(a: ArrayLike, new_shape: Sequence[SupportsIndex]) -> NDArray[Any]: ... @overload def squeeze( @@ -361,14 +497,51 @@ def trace( out: _ArrayType = ..., ) -> _ArrayType: ... +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] + +@overload +def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = "C") -> _Array1D[_SCT]: ... @overload -def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = ...) -> NDArray[_SCT]: ... +def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... @overload -def ravel(a: ArrayLike, order: _OrderKACF = ...) -> NDArray[Any]: ... +def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ... +@overload +def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... +@overload +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +@overload +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +@overload +def ravel( + a: complex | _NestedSequence[complex], + order: _OrderKACF = "C", +) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype[Any]]: ... -def nonzero(a: ArrayLike) -> tuple[NDArray[intp], ...]: ... +@overload +def nonzero(a: np.generic | np.ndarray[tuple[()], Any]) -> NoReturn: ... +@overload +def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... -def shape(a: ArrayLike) -> _Shape: ... +# this prevents `Any` from being returned with Pyright +@overload +def shape(a: _SupportsShape[Never]) -> tuple[int, ...]: ... +@overload +def shape(a: _SupportsShape[_ShapeType]) -> _ShapeType: ... +@overload +def shape(a: _PyScalar) -> tuple[()]: ... +# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload +def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +# this overload will be skipped by typecheckers that don't support PEP 688 +@overload +def shape(a: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(a: ArrayLike) -> tuple[int, ...]: ... @overload def compress( @@ -489,6 +662,16 @@ def clip( casting: _CastingKind = ..., ) -> _ArrayType: ... +@overload +def sum( + a: _ArrayLike[_SCT], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... @overload def sum( a: _ArrayLike[_SCT], @@ -498,8 +681,50 @@ def sum( keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., +) -> _SCT | NDArray[_SCT]: ... +@overload +def sum( + a: ArrayLike, + axis: None, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., ) -> _SCT: ... @overload +def sum( + a: ArrayLike, + axis: None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload +def sum( + a: ArrayLike, + axis: None | _ShapeLike, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT | NDArray[_SCT]: ... +@overload +def sum( + a: ArrayLike, + axis: None | _ShapeLike = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT | NDArray[_SCT]: ... +@overload def sum( a: ArrayLike, axis: None | _ShapeLike = ..., @@ -523,57 +748,75 @@ def sum( @overload def all( a: ArrayLike, - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] = False, *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, ) -> np.bool: ... @overload def all( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., + axis: None | int | tuple[int, ...] = None, + out: None = None, + keepdims: SupportsIndex = False, *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co = True, +) -> np.bool | NDArray[np.bool]: ... @overload def all( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., + axis: None | int | tuple[int, ...], + out: _ArrayType, + keepdims: SupportsIndex = False, *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, +) -> _ArrayType: ... +@overload +def all( + a: ArrayLike, + axis: None | int | tuple[int, ...] = None, + *, + out: _ArrayType, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, ) -> _ArrayType: ... @overload def any( a: ArrayLike, - axis: None = ..., - out: None = ..., - keepdims: Literal[False] = ..., + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] = False, *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, ) -> np.bool: ... @overload def any( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: None = ..., - keepdims: bool = ..., + axis: None | int | tuple[int, ...] = None, + out: None = None, + keepdims: SupportsIndex = False, *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co = True, +) -> np.bool | NDArray[np.bool]: ... @overload def any( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., + axis: None | int | tuple[int, ...], + out: _ArrayType, + keepdims: SupportsIndex = False, *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co = True, +) -> _ArrayType: ... +@overload +def any( + a: ArrayLike, + axis: None | int | tuple[int, ...] = None, + *, + out: _ArrayType, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, ) -> _ArrayType: ... @overload @@ -1062,6 +1305,16 @@ def mean( where: _ArrayLikeBool_co = ..., ) -> complexfloating[Any, Any]: ... @overload +def mean( + a: _ArrayLikeTD64_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> timedelta64: ... +@overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., @@ -1082,6 +1335,26 @@ def mean( where: _ArrayLikeBool_co = ..., ) -> _SCT: ... @overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co = ..., +) -> _SCT | NDArray[_SCT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT | NDArray[_SCT]: ... +@overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 0e98196f2922..cba071768ab7 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -33,9 +33,6 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, The endpoint of the interval can optionally be excluded. - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - .. versionchanged:: 1.20.0 Values are rounded towards ``-inf`` instead of ``0`` when an integer ``dtype`` is specified. The old behavior can @@ -63,14 +60,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, is inferred from `start` and `stop`. The inferred dtype will never be an integer; `float` is chosen even if the arguments would produce an array of integers. - - .. versionadded:: 1.9.0 axis : int, optional The axis in the result to store the samples. Relevant only if start or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Use -1 to get an axis at the end. - - .. versionadded:: 1.16.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -209,9 +202,6 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, (`base` to the power of `start`) and ends with ``base ** stop`` (see `endpoint` below). - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - .. versionchanged:: 1.25.0 Non-scalar 'base` is now supported @@ -244,9 +234,6 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, along a new axis inserted at the beginning. Use -1 to get an axis at the end. - .. versionadded:: 1.16.0 - - Returns ------- samples : ndarray @@ -328,9 +315,6 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): This is similar to `logspace`, but with endpoints specified directly. Each output sample is a constant multiple of the previous. - .. versionchanged:: 1.16.0 - Non-scalar `start` and `stop` are now supported. - Parameters ---------- start : array_like @@ -355,8 +339,6 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Use -1 to get an axis at the end. - .. versionadded:: 1.16.0 - Returns ------- samples : ndarray @@ -551,6 +533,8 @@ def add_newdoc(place, obj, doc, warn_on_python=True): """ new = getattr(__import__(place, globals(), {}, [obj]), obj) if isinstance(doc, str): + if "${ARRAY_FUNCTION_LIKE}" in doc: + doc = overrides.get_array_function_like_doc(new, doc) _add_docstring(new, doc.strip(), warn_on_python) elif isinstance(doc, tuple): attr, docstring = doc diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 59c3d6b4ea2c..1d7ea3a2792e 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -15,9 +15,9 @@ from numpy._typing import ( _ArrayLikeComplex_co, ) -_SCT = TypeVar("_SCT", bound=generic) +__all__ = ["logspace", "linspace", "geomspace"] -__all__: list[str] +_SCT = TypeVar("_SCT", bound=generic) @overload def linspace( diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index 669dfc71e298..3ceb8139ee70 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -3,6 +3,7 @@ """ __all__ = ['finfo', 'iinfo'] +import types import warnings from .._utils import set_module @@ -487,6 +488,8 @@ class finfo: _finfo_cache = {} + __class_getitem__ = classmethod(types.GenericAlias) + def __new__(cls, dtype): try: obj = cls._finfo_cache.get(dtype) # most common path @@ -689,6 +692,8 @@ class iinfo: _min_vals = {} _max_vals = {} + __class_getitem__ = classmethod(types.GenericAlias) + def __init__(self, int_type): try: self.dtype = numeric.dtype(int_type) diff --git a/numpy/_core/getlimits.pyi b/numpy/_core/getlimits.pyi index da5e3c23ea72..9d79b178f4dc 100644 --- a/numpy/_core/getlimits.pyi +++ b/numpy/_core/getlimits.pyi @@ -1,6 +1,3 @@ -from numpy import ( - finfo as finfo, - iinfo as iinfo, -) +from numpy import finfo, iinfo -__all__: list[str] +__all__ = ["finfo", "iinfo"] diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index 9dd3effa3a80..b37c9fbb6821 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -268,7 +268,8 @@ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, * * The function must clean up on error. * - * @param nargs Number of arguments + * @param nin Number of input arguments + * @param nout Number of output arguments * @param new_dtypes The DTypes of the output (usually probably not needed) * @param given_descrs Original given_descrs to the resolver, necessary to * fetch any information related to the new dtypes from the original. diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 573f26938d87..37788a74557f 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -6,6 +6,10 @@ #include "npy_cpu.h" #include "utils.h" +#ifdef __cplusplus +extern "C" { +#endif + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN /* Always allow threading unless it was explicitly disabled at build time */ @@ -841,7 +845,7 @@ typedef struct { npy_int32 month, day, hour, min, sec, us, ps, as; } npy_datetimestruct; -/* This is not used internally. */ +/* This structure contains an exploded view of a timedelta value */ typedef struct { npy_int64 day; npy_int32 sec, us, ps, as; @@ -1922,4 +1926,8 @@ typedef struct { */ #undef NPY_DEPRECATED_INCLUDES +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 80bb4088c812..e39e65aedea7 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI(void) #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 3132b602a7c8..79ad8ad78cb2 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -379,11 +379,6 @@ typedef struct #include -// Downstream libraries like sympy would like to use I -// see https://github.com/numpy/numpy/issues/26787 -#ifdef I -#undef I -#endif #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; diff --git a/numpy/_core/include/numpy/npy_cpu.h b/numpy/_core/include/numpy/npy_cpu.h index a19f8e6bbdd9..15f9f12931c8 100644 --- a/numpy/_core/include/numpy/npy_cpu.h +++ b/numpy/_core/include/numpy/npy_cpu.h @@ -18,6 +18,7 @@ * NPY_CPU_ARCEL * NPY_CPU_ARCEB * NPY_CPU_RISCV64 + * NPY_CPU_RISCV32 * NPY_CPU_LOONGARCH * NPY_CPU_WASM */ @@ -102,8 +103,12 @@ #define NPY_CPU_ARCEL #elif defined(__arc__) && defined(__BIG_ENDIAN__) #define NPY_CPU_ARCEB -#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64 - #define NPY_CPU_RISCV64 +#elif defined(__riscv) + #if __riscv_xlen == 64 + #define NPY_CPU_RISCV64 + #elif __riscv_xlen == 32 + #define NPY_CPU_RISCV32 + #endif #elif defined(__loongarch__) #define NPY_CPU_LOONGARCH #elif defined(__EMSCRIPTEN__) diff --git a/numpy/_core/include/numpy/npy_endian.h b/numpy/_core/include/numpy/npy_endian.h index 5e58a7f52cee..09262120bf82 100644 --- a/numpy/_core/include/numpy/npy_endian.h +++ b/numpy/_core/include/numpy/npy_endian.h @@ -49,6 +49,7 @@ || defined(NPY_CPU_PPC64LE) \ || defined(NPY_CPU_ARCEL) \ || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_RISCV32) \ || defined(NPY_CPU_LOONGARCH) \ || defined(NPY_CPU_WASM) #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index b49d215614ac..46ecade41ada 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -121,8 +121,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.9 support) */ - #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION + /* Use the default (increase when dropping Python 3.10 support) */ + #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -130,7 +130,14 @@ #error "NPY_TARGET_VERSION higher than NumPy headers!" #elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION /* No support for irrelevant old targets, no need for error, but warn. */ - #warning "Requested NumPy target lower than supported NumPy 1.15." + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif #endif /* diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index ada23626f70b..169a93eb5597 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -170,8 +170,10 @@ typedef struct _tagPyUFuncObject { * with the dtypes for the inputs and outputs. */ PyUFunc_TypeResolutionFunc *type_resolver; - /* Was the legacy loop resolver */ - void *reserved2; + + /* A dictionary to monkeypatch ufuncs */ + PyObject *dict; + /* * This was blocked off to be the "new" inner loop selector in 1.7, * but this was never implemented. (This is also why the above diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index 268b23dbadf9..a5fa10c0e036 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -84,7 +84,7 @@ class memmap(ndarray): .. versionchanged:: 2.0 The shape parameter can now be any integer sequence type, previously types were limited to tuple and int. - + order : {'C', 'F'}, optional Specify the order of the ndarray memory layout: :term:`row-major`, C-style or :term:`column-major`, @@ -262,10 +262,14 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, bytes = int(offset + size*_dbytes) - if mode in ('w+', 'r+') and flen < bytes: - fid.seek(bytes - 1, 0) - fid.write(b'\0') - fid.flush() + if mode in ('w+', 'r+'): + # gh-27723 + # if bytes == 0, we write out 1 byte to allow empty memmap. + bytes = max(bytes, 1) + if flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() if mode == 'c': acc = mmap.ACCESS_COPY @@ -276,6 +280,11 @@ def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, start = offset - offset % mmap.ALLOCATIONGRANULARITY bytes -= start + # bytes == 0 is problematic as in mmap length=0 maps the full file. + # See PR gh-27723 for a more detailed explanation. + if bytes == 0 and start > 0: + bytes += mmap.ALLOCATIONGRANULARITY + start -= mmap.ALLOCATIONGRANULARITY array_offset = offset - start mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) diff --git a/numpy/_core/memmap.pyi b/numpy/_core/memmap.pyi index 03c6b772dcd5..0b31328404fb 100644 --- a/numpy/_core/memmap.pyi +++ b/numpy/_core/memmap.pyi @@ -1,3 +1,3 @@ -from numpy import memmap as memmap +from numpy import memmap -__all__: list[str] +__all__ = ["memmap"] diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index dbf1a144ed93..d32d71adc5dd 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -245,8 +245,8 @@ endforeach # variable attributes tested via "int %s a" % attribute optional_variable_attributes = [ - ['thread_local', 'HAVE_THREAD_LOCAL'], - ['_Thread_local', 'HAVE__THREAD_LOCAL'], + ['thread_local', 'HAVE_THREAD_LOCAL'], # C23 + ['_Thread_local', 'HAVE__THREAD_LOCAL'], # C11/C17 ['__thread', 'HAVE__THREAD'], ['__declspec(thread)', 'HAVE___DECLSPEC_THREAD_'] ] @@ -681,7 +681,6 @@ c_args_common = [ # Same as NPY_CXX_FLAGS (TODO: extend for what ccompiler_opt adds) cpp_args_common = c_args_common + [ - '-D__STDC_VERSION__=0', # for compatibility with C headers ] if cc.get_argument_syntax() != 'msvc' cpp_args_common += [ @@ -714,7 +713,7 @@ py.extension_module('_multiarray_tests', src_file.process('src/multiarray/_multiarray_tests.c.src'), 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.c', + 'src/common/npy_hashtable.cpp', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, @@ -829,7 +828,7 @@ foreach gen_mtargets : [ 'highway_qsort.dispatch.h', 'src/npysort/highway_qsort.dispatch.cpp', use_highway ? [ - SVE, ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault + ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault ] : [] ], [ @@ -952,7 +951,9 @@ foreach gen_mtargets : [ 'src/umath/loops_trigonometric.dispatch.cpp', [ AVX512_SKX, [AVX2, FMA3], + VSX4, VSX3, VSX2, NEON_VFPV4, + VXE2, VXE, ] ], [ @@ -1041,7 +1042,7 @@ src_multiarray_umath_common = [ 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.c', + 'src/common/npy_hashtable.cpp', 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ucsnarrow.c', @@ -1152,7 +1153,7 @@ src_umath = umath_gen_headers + [ 'src/umath/ufunc_type_resolution.c', 'src/umath/clip.cpp', 'src/umath/clip.h', - 'src/umath/dispatching.c', + 'src/umath/dispatching.cpp', 'src/umath/extobj.c', 'src/umath/legacy_array_method.c', 'src/umath/override.c', @@ -1213,7 +1214,8 @@ py.extension_module('_multiarray_umath', src_numpy_api[1], # __multiarray_api.h src_umath_doc_h, npy_math_internal_h, - ] + svml_objects, + ], + objects: svml_objects, c_args: c_args_common, cpp_args: cpp_args_common, include_directories: [ diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index e2ca115b3728..088de1073e7e 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -1,7 +1,7 @@ """ -Create the numpy._core.multiarray namespace for backward compatibility. -In v1.16 the multiarray and umath c-extension modules were merged into -a single _multiarray_umath extension module. So we replicate the old +Create the numpy._core.multiarray namespace for backward compatibility. +In v1.16 the multiarray and umath c-extension modules were merged into +a single _multiarray_umath extension module. So we replicate the old namespace by importing from the extension module. """ @@ -17,7 +17,6 @@ _flagdict, from_dlpack, _place, _reconstruct, _vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version, _get_madvise_hugepage, _set_madvise_hugepage, - _get_promotion_state, _set_promotion_state ) __all__ = [ @@ -40,10 +39,9 @@ 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', 'set_typeDict', 'shares_memory', 'typeinfo', - 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros', - '_get_promotion_state', '_set_promotion_state'] + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros'] -# For backward compatibility, make sure pickle imports +# For backward compatibility, make sure pickle imports # these functions from here _reconstruct.__module__ = 'numpy._core.multiarray' scalar.__module__ = 'numpy._core.multiarray' @@ -67,9 +65,36 @@ nested_iters.__module__ = 'numpy' promote_types.__module__ = 'numpy' zeros.__module__ = 'numpy' -_get_promotion_state.__module__ = 'numpy' -_set_promotion_state.__module__ = 'numpy' normalize_axis_index.__module__ = 'numpy.lib.array_utils' +add_docstring.__module__ = 'numpy.lib' +compare_chararrays.__module__ = 'numpy.char' + + +def _override___module__(): + namespace_names = globals() + for ufunc_name in [ + 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan', + 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert', + 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt', + 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', + 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs', + 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', + 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot', + 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', + 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', + 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', + ]: + ufunc = namespace_names[ufunc_name] + ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name + + +_override___module__() # We can't verify dispatcher signatures because NumPy's C functions don't @@ -96,15 +121,11 @@ def empty_like( of the returned array. dtype : data-type, optional Overrides the data type of the result. - - .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `prototype` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `prototype` as closely as possible. - - .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of `prototype`, otherwise it will be a base-class array. Defaults @@ -113,8 +134,6 @@ def empty_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -162,10 +181,10 @@ def empty_like( def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): """ concatenate( - (a1, a2, ...), - axis=0, - out=None, - dtype=None, + (a1, a2, ...), + axis=0, + out=None, + dtype=None, casting="same_kind" ) @@ -192,7 +211,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur. Defaults to 'same_kind'. For a description of the options, please see :term:`casting`. - + .. versionadded:: 1.20.0 Returns @@ -300,6 +319,7 @@ def inner(a, b): -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. + vecdot : Vector dot product of two arrays. einsum : Einstein summation convention. Notes @@ -588,16 +608,6 @@ def can_cast(from_, to, casting=None): Notes ----- - .. versionchanged:: 1.17.0 - Casting between a simple data type and a structured one is possible only - for "unsafe" casting. Casting to multiple fields is allowed, but - casting from multiple fields is not. - - .. versionchanged:: 1.9.0 - Casting from numeric to string types in 'safe' casting mode requires - that the string dtype length is long enough to store the maximum - integer/float value converted. - .. versionchanged:: 2.0 This function does not support Python scalars anymore and does not apply any value-based logic for 0-D arrays and NumPy scalars. @@ -651,10 +661,6 @@ def min_scalar_type(a): out : dtype The minimal data type. - Notes - ----- - .. versionadded:: 1.6.0 - See Also -------- result_type, promote_types, dtype, can_cast @@ -717,8 +723,6 @@ def result_type(*arrays_and_dtypes): Notes ----- - .. versionadded:: 1.6.0 - The specific algorithm used is as follows. Categories are determined by first checking which of boolean, @@ -813,6 +817,7 @@ def dot(a, b, out=None): See Also -------- vdot : Complex-conjugating dot product. + vecdot : Vector dot product of two arrays. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. matmul : '@' operator as method with out parameter. @@ -850,18 +855,22 @@ def dot(a, b, out=None): @array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) def vdot(a, b): - """ + r""" vdot(a, b, /) Return the dot product of two vectors. - The vdot(`a`, `b`) function handles complex numbers differently than - dot(`a`, `b`). If the first argument is complex the complex conjugate - of the first argument is used for the calculation of the dot product. + The `vdot` function handles complex numbers differently than `dot`: + if the first argument is complex, it is replaced by its complex conjugate + in the dot product calculation. `vdot` also handles multidimensional + arrays differently than `dot`: it does not perform a matrix product, but + flattens the arguments to 1-D arrays before taking a vector dot product. - Note that `vdot` handles multidimensional arrays differently than `dot`: - it does *not* perform a matrix product, but flattens input arguments - to 1-D vectors first. Consequently, it should only be used for vectors. + Consequently, when the arguments are 2-D arrays of the same shape, this + function effectively returns their + `Frobenius inner product `_ + (also known as the *trace inner product* or the *standard inner product* + on a vector space of matrices). Parameters ---------- @@ -903,7 +912,7 @@ def vdot(a, b): >>> 1*4 + 4*1 + 5*2 + 6*2 30 - """ + """ # noqa: E501 return (a, b) @@ -932,8 +941,6 @@ def bincount(x, weights=None, minlength=None): minlength : int, optional A minimum number of bins for the output array. - .. versionadded:: 1.6.0 - Returns ------- out : ndarray of ints @@ -1024,10 +1031,6 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): -------- unravel_index - Notes - ----- - .. versionadded:: 1.6.0 - Examples -------- >>> import numpy as np @@ -1063,16 +1066,10 @@ def unravel_index(indices, shape=None, order=None): this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling ``indices``. - - .. versionchanged:: 1.16.0 - Renamed from ``dims`` to ``shape``. - order : {'C', 'F'}, optional Determines whether the indices should be viewed as indexing in row-major (C-style) or column-major (Fortran-style) order. - .. versionadded:: 1.6.0 - Returns ------- unraveled_coords : tuple of ndarray @@ -1108,8 +1105,6 @@ def copyto(dst, src, casting=None, where=None): Raises a TypeError if the `casting` rule is violated, and if `where` is provided, it selects which elements to copy. - .. versionadded:: 1.7.0 - Parameters ---------- dst : ndarray @@ -1219,8 +1214,6 @@ def packbits(a, axis=None, bitorder='big'): reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. Defaults to 'big'. - .. versionadded:: 1.17.0 - Returns ------- packed : ndarray @@ -1283,17 +1276,12 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): default). Counts larger than the available number of bits will add zero padding to the output. Negative counts must not exceed the available number of bits. - - .. versionadded:: 1.17.0 - bitorder : {'big', 'little'}, optional The order of the returned bits. 'big' will mimic bin(val), ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. Defaults to 'big'. - .. versionadded:: 1.17.0 - Returns ------- unpacked : ndarray, uint8 type @@ -1465,17 +1453,15 @@ def may_share_memory(a, b, max_work=None): def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): """ is_busday( - dates, - weekmask='1111100', - holidays=None, - busdaycal=None, + dates, + weekmask='1111100', + holidays=None, + busdaycal=None, out=None ) Calculates which of the given dates are valid days, and which are not. - .. versionadded:: 1.7.0 - Parameters ---------- dates : array_like of datetime64[D] @@ -1527,12 +1513,12 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, busdaycal=None, out=None): """ busday_offset( - dates, - offsets, - roll='raise', - weekmask='1111100', - holidays=None, - busdaycal=None, + dates, + offsets, + roll='raise', + weekmask='1111100', + holidays=None, + busdaycal=None, out=None ) @@ -1540,8 +1526,6 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, the ``roll`` rule, then applies offsets to the given dates counted in valid days. - .. versionadded:: 1.7.0 - Parameters ---------- dates : array_like of datetime64[D] @@ -1631,11 +1615,11 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, busdaycal=None, out=None): """ busday_count( - begindates, - enddates, - weekmask='1111100', - holidays=[], - busdaycal=None, + begindates, + enddates, + weekmask='1111100', + holidays=[], + busdaycal=None, out=None ) @@ -1645,8 +1629,6 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, If ``enddates`` specifies a date value that is earlier than the corresponding ``begindates`` date value, the count will be negative. - .. versionadded:: 1.7.0 - Parameters ---------- begindates : array_like of datetime64[D] @@ -1715,7 +1697,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): arr : array_like of datetime64 The array of UTC timestamps to format. unit : str - One of None, 'auto', or + One of None, 'auto', or a :ref:`datetime unit `. timezone : {'naive', 'UTC', 'local'} or tzinfo Timezone information to use when displaying the datetime. If 'UTC', diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index dd1093015301..28cf5411645f 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -1,28 +1,39 @@ # TODO: Sort out any and all missing functions in this namespace -import builtins -import os import datetime as dt +from _typeshed import StrOrBytesPath, SupportsLenAndGetItem from collections.abc import Sequence, Callable, Iterable from typing import ( Literal as L, Any, + TypeAlias, overload, TypeVar, + TypedDict, SupportsIndex, final, Final, Protocol, ClassVar, + type_check_only, ) +from typing_extensions import CapsuleType, Unpack import numpy as np -from numpy import ( +from numpy import ( # type: ignore[attr-defined] # Re-exports - busdaycalendar as busdaycalendar, - broadcast as broadcast, - dtype as dtype, - ndarray as ndarray, - nditer as nditer, + busdaycalendar, + broadcast, + correlate, + count_nonzero, + dtype, + einsum as c_einsum, + flatiter, + from_dlpack, + interp, + matmul, + ndarray, + nditer, + vecdot, # The rest ufunc, @@ -43,11 +54,12 @@ from numpy import ( _CastingKind, _ModeKind, _SupportsBuffer, - _IOProtocol, + _SupportsFileMethods, _CopyMode, _NDIterFlagsKind, - _NDIterOpFlagsKind, + _NDIterFlagsOp, ) +from numpy.lib._array_utils_impl import normalize_axis_index from numpy._typing import ( # Shapes @@ -56,6 +68,7 @@ from numpy._typing import ( # DTypes DTypeLike, _DTypeLike, + _SupportsDType, # Arrays NDArray, @@ -78,19 +91,128 @@ from numpy._typing import ( _FloatLike_co, _TD64Like_co, ) +from numpy._typing._ufunc import ( + _2PTuple, + _PyFunc_Nin1_Nout1, + _PyFunc_Nin2_Nout1, + _PyFunc_Nin3P_Nout1, + _PyFunc_Nin1P_Nout2P, +) + +__all__ = [ + "_ARRAY_API", + "ALLOW_THREADS", + "BUFSIZE", + "CLIP", + "DATETIMEUNITS", + "ITEM_HASOBJECT", + "ITEM_IS_POINTER", + "LIST_PICKLE", + "MAXDIMS", + "MAY_SHARE_BOUNDS", + "MAY_SHARE_EXACT", + "NEEDS_INIT", + "NEEDS_PYAPI", + "RAISE", + "USE_GETITEM", + "USE_SETITEM", + "WRAP", + "_flagdict", + "from_dlpack", + "_place", + "_reconstruct", + "_vec_string", + "_monotonicity", + "add_docstring", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "bincount", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "can_cast", + "compare_chararrays", + "concatenate", + "copyto", + "correlate", + "correlate2", + "count_nonzero", + "c_einsum", + "datetime_as_string", + "datetime_data", + "dot", + "dragon4_positional", + "dragon4_scientific", + "dtype", + "empty", + "empty_like", + "error", + "flagsobj", + "flatiter", + "format_longfloat", + "frombuffer", + "fromfile", + "fromiter", + "fromstring", + "get_handler_name", + "get_handler_version", + "inner", + "interp", + "interp_complex", + "is_busday", + "lexsort", + "matmul", + "vecdot", + "may_share_memory", + "min_scalar_type", + "ndarray", + "nditer", + "nested_iters", + "normalize_axis_index", + "packbits", + "promote_types", + "putmask", + "ravel_multi_index", + "result_type", + "scalar", + "set_datetimeparse_function", + "set_typeDict", + "shares_memory", + "typeinfo", + "unpackbits", + "unravel_index", + "vdot", + "where", + "zeros", +] _T_co = TypeVar("_T_co", covariant=True) _T_contra = TypeVar("_T_contra", contravariant=True) _SCT = TypeVar("_SCT", bound=generic) +_DType = TypeVar("_DType", bound=np.dtype[Any]) _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) _ArrayType_co = TypeVar( "_ArrayType_co", bound=ndarray[Any, Any], covariant=True, ) +_ReturnType = TypeVar("_ReturnType") +_IDType = TypeVar("_IDType") +_Nin = TypeVar("_Nin", bound=int) +_Nout = TypeVar("_Nout", bound=int) + +_SizeType = TypeVar("_SizeType", bound=int) +_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) +_1DArray: TypeAlias = ndarray[tuple[_SizeType], dtype[_SCT]] +_Array: TypeAlias = ndarray[_ShapeType, dtype[_SCT]] # Valid time units -_UnitKind = L[ +_UnitKind: TypeAlias = L[ "Y", "M", "D", @@ -104,7 +226,7 @@ _UnitKind = L[ "fs", "as", ] -_RollKind = L[ # `raise` is deliberately excluded +_RollKind: TypeAlias = L[ # `raise` is deliberately excluded "nat", "forward", "following", @@ -114,14 +236,147 @@ _RollKind = L[ # `raise` is deliberately excluded "modifiedpreceding", ] -class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]): - def __len__(self) -> int: ... - def __getitem__(self, key: _T_contra, /) -> _T_co: ... - +@type_check_only class _SupportsArray(Protocol[_ArrayType_co]): def __array__(self, /) -> _ArrayType_co: ... -__all__: list[str] +@type_check_only +class _KwargsEmpty(TypedDict, total=False): + device: None | L["cpu"] + like: None | _SupportsArrayFunc + +@type_check_only +class _ConstructorEmpty(Protocol): + # 1-D shape + @overload + def __call__( + self, /, + shape: _SizeType, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[tuple[_SizeType], float64]: ... + @overload + def __call__( + self, /, + shape: _SizeType, + dtype: _DType | _SupportsDType[_DType], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[tuple[_SizeType], _DType]: ... + @overload + def __call__( + self, /, + shape: _SizeType, + dtype: type[_SCT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[tuple[_SizeType], _SCT]: ... + @overload + def __call__( + self, /, + shape: _SizeType, + dtype: DTypeLike, + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[tuple[_SizeType], Any]: ... + + # known shape + @overload + def __call__( + self, /, + shape: _ShapeType, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_ShapeType, float64]: ... + @overload + def __call__( + self, /, + shape: _ShapeType, + dtype: _DType | _SupportsDType[_DType], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[_ShapeType, _DType]: ... + @overload + def __call__( + self, /, + shape: _ShapeType, + dtype: type[_SCT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_ShapeType, _SCT]: ... + @overload + def __call__( + self, /, + shape: _ShapeType, + dtype: DTypeLike, + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_ShapeType, Any]: ... + + # unknown shape + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[float64]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: _DType | _SupportsDType[_DType], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[Any, _DType]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: type[_SCT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[_SCT]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: DTypeLike, + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[Any]: ... + +error: Final = Exception + +# from ._multiarray_umath +ITEM_HASOBJECT: Final[L[1]] +LIST_PICKLE: Final[L[2]] +ITEM_IS_POINTER: Final[L[4]] +NEEDS_INIT: Final[L[8]] +NEEDS_PYAPI: Final[L[16]] +USE_GETITEM: Final[L[32]] +USE_SETITEM: Final[L[64]] +DATETIMEUNITS: Final[CapsuleType] +_ARRAY_API: Final[CapsuleType] +_flagdict: Final[dict[str, int]] +_monotonicity: Final[Callable[..., object]] +_place: Final[Callable[..., object]] +_reconstruct: Final[Callable[..., object]] +_vec_string: Final[Callable[..., object]] +correlate2: Final[Callable[..., object]] +dragon4_positional: Final[Callable[..., object]] +dragon4_scientific: Final[Callable[..., object]] +interp_complex: Final[Callable[..., object]] +set_datetimeparse_function: Final[Callable[..., object]] +def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... +def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... +def format_longfloat(x: np.longdouble, precision: int) -> str: ... +def scalar(dtype: _DType, object: bytes | object = ...) -> ndarray[tuple[()], _DType]: ... +def set_typeDict(dict_: dict[str, np.dtype[Any]], /) -> None: ... +typeinfo: Final[dict[str, np.dtype[np.generic]]] ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) BUFSIZE: L[8192] @@ -133,6 +388,9 @@ MAY_SHARE_BOUNDS: L[0] MAY_SHARE_EXACT: L[-1] tracemalloc_domain: L[389047] +zeros: Final[_ConstructorEmpty] +empty: Final[_ConstructorEmpty] + @overload def empty_like( prototype: _ArrayType, @@ -251,62 +509,6 @@ def array( like: None | _SupportsArrayFunc = ..., ) -> NDArray[Any]: ... -@overload -def zeros( - shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... -@overload -def zeros( - shape: _ShapeLike, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def zeros( - shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... - -@overload -def empty( - shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... -@overload -def empty( - shape: _ShapeLike, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def empty( - shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... - @overload def unravel_index( # type: ignore[misc] indices: _IntLike_co, @@ -348,7 +550,7 @@ def concatenate( # type: ignore[misc] ) -> NDArray[_SCT]: ... @overload def concatenate( # type: ignore[misc] - arrays: _SupportsLenAndGetItem[int, ArrayLike], + arrays: SupportsLenAndGetItem[ArrayLike], /, axis: None | SupportsIndex = ..., out: None = ..., @@ -358,7 +560,7 @@ def concatenate( # type: ignore[misc] ) -> NDArray[Any]: ... @overload def concatenate( # type: ignore[misc] - arrays: _SupportsLenAndGetItem[int, ArrayLike], + arrays: SupportsLenAndGetItem[ArrayLike], /, axis: None | SupportsIndex = ..., out: None = ..., @@ -368,7 +570,7 @@ def concatenate( # type: ignore[misc] ) -> NDArray[_SCT]: ... @overload def concatenate( # type: ignore[misc] - arrays: _SupportsLenAndGetItem[int, ArrayLike], + arrays: SupportsLenAndGetItem[ArrayLike], /, axis: None | SupportsIndex = ..., out: None = ..., @@ -378,7 +580,7 @@ def concatenate( # type: ignore[misc] ) -> NDArray[Any]: ... @overload def concatenate( - arrays: _SupportsLenAndGetItem[int, ArrayLike], + arrays: SupportsLenAndGetItem[ArrayLike], /, axis: None | SupportsIndex = ..., out: _ArrayType = ..., @@ -678,17 +880,82 @@ def fromstring( like: None | _SupportsArrayFunc = ..., ) -> NDArray[Any]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: None = ..., +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: _IDType, +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... +@overload def frompyfunc( func: Callable[..., Any], /, nin: SupportsIndex, nout: SupportsIndex, *, - identity: Any = ..., + identity: None | object = ..., ) -> ufunc: ... @overload def fromfile( - file: str | bytes | os.PathLike[Any] | _IOProtocol, + file: StrOrBytesPath | _SupportsFileMethods, dtype: None = ..., count: SupportsIndex = ..., sep: str = ..., @@ -698,7 +965,7 @@ def fromfile( ) -> NDArray[float64]: ... @overload def fromfile( - file: str | bytes | os.PathLike[Any] | _IOProtocol, + file: StrOrBytesPath | _SupportsFileMethods, dtype: _DTypeLike[_SCT], count: SupportsIndex = ..., sep: str = ..., @@ -708,7 +975,7 @@ def fromfile( ) -> NDArray[_SCT]: ... @overload def fromfile( - file: str | bytes | os.PathLike[Any] | _IOProtocol, + file: StrOrBytesPath | _SupportsFileMethods, dtype: DTypeLike, count: SupportsIndex = ..., sep: str = ..., @@ -769,7 +1036,7 @@ def arange( # type: ignore[misc] dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[signedinteger[Any]]: ... +) -> _1DArray[int, signedinteger[Any]]: ... @overload def arange( # type: ignore[misc] start: _IntLike_co, @@ -779,7 +1046,7 @@ def arange( # type: ignore[misc] *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[signedinteger[Any]]: ... +) -> _1DArray[int, signedinteger[Any]]: ... @overload def arange( # type: ignore[misc] stop: _FloatLike_co, @@ -787,7 +1054,7 @@ def arange( # type: ignore[misc] dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[floating[Any]]: ... +) -> _1DArray[int, floating[Any]]: ... @overload def arange( # type: ignore[misc] start: _FloatLike_co, @@ -797,7 +1064,7 @@ def arange( # type: ignore[misc] *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[floating[Any]]: ... +) -> _1DArray[int, floating[Any]]: ... @overload def arange( stop: _TD64Like_co, @@ -805,7 +1072,7 @@ def arange( dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[timedelta64]: ... +) -> _1DArray[int, timedelta64]: ... @overload def arange( start: _TD64Like_co, @@ -815,7 +1082,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[timedelta64]: ... +) -> _1DArray[int, timedelta64]: ... @overload def arange( # both start and stop must always be specified for datetime64 start: datetime64, @@ -825,7 +1092,7 @@ def arange( # both start and stop must always be specified for datetime64 *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[datetime64]: ... +) -> _1DArray[int, datetime64]: ... @overload def arange( stop: Any, @@ -833,7 +1100,7 @@ def arange( dtype: _DTypeLike[_SCT], device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... +) -> _1DArray[int, _SCT]: ... @overload def arange( start: Any, @@ -843,7 +1110,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... +) -> _1DArray[int, _SCT]: ... @overload def arange( stop: Any, /, @@ -851,7 +1118,7 @@ def arange( dtype: DTypeLike, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... +) -> _1DArray[int, Any]: ... @overload def arange( start: Any, @@ -861,7 +1128,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... +) -> _1DArray[int, Any]: ... def datetime_data( dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, @@ -1017,7 +1284,7 @@ def compare_chararrays( def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... -_GetItemKeys = L[ +_GetItemKeys: TypeAlias = L[ "C", "CONTIGUOUS", "C_CONTIGUOUS", "F", "FORTRAN", "F_CONTIGUOUS", "W", "WRITEABLE", @@ -1030,7 +1297,7 @@ _GetItemKeys = L[ "FNC", "FORC", ] -_SetItemKeys = L[ +_SetItemKeys: TypeAlias = L[ "A", "ALIGNED", "W", "WRITEABLE", "X", "WRITEBACKIFCOPY", @@ -1073,7 +1340,7 @@ def nested_iters( op: ArrayLike | Sequence[ArrayLike], axes: Sequence[Sequence[SupportsIndex]], flags: None | Sequence[_NDIterFlagsKind] = ..., - op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ..., + op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ..., op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., order: _OrderKACF = ..., casting: _CastingKind = ..., diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 39b3de44fabe..d4ca10a635dd 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -17,18 +17,17 @@ empty, empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter, fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, ndarray, nditer, nested_iters, promote_types, putmask, result_type, - shares_memory, vdot, where, zeros, normalize_axis_index, - _get_promotion_state, _set_promotion_state, vecdot + shares_memory, vdot, where, zeros, normalize_axis_index, vecdot ) from . import overrides from . import umath from . import shape_base -from .overrides import set_array_function_like_doc, set_module +from .overrides import finalize_array_function_like, set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes from ..exceptions import AxisError -from ._ufunc_config import errstate, _no_nep50_warning +from ._ufunc_config import errstate bitwise_not = invert ufunc = type(sin) @@ -53,7 +52,7 @@ 'identity', 'allclose', 'putmask', 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', - 'may_share_memory', '_get_promotion_state', '_set_promotion_state'] + 'may_share_memory'] def _zeros_like_dispatcher( @@ -76,15 +75,11 @@ def zeros_like( the returned array. dtype : data-type, optional Overrides the data type of the result. - - .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. - - .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of `a`, otherwise it will be a base-class array. Defaults @@ -93,8 +88,6 @@ def zeros_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -141,7 +134,7 @@ def zeros_like( return res -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def ones(shape, dtype=None, order='C', *, device=None, like=None): """ @@ -231,15 +224,11 @@ def ones_like( the returned array. dtype : data-type, optional Overrides the data type of the result. - - .. versionadded:: 1.6.0 order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. - - .. versionadded:: 1.6.0 subok : bool, optional. If True, then the newly created array will use the sub-class type of `a`, otherwise it will be a base-class array. Defaults @@ -248,8 +237,6 @@ def ones_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -300,7 +287,7 @@ def _full_dispatcher( return(like,) -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): """ @@ -407,8 +394,6 @@ def full_like( Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. - - .. versionadded:: 1.17.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -485,16 +470,11 @@ def count_nonzero(a, axis=None, *, keepdims=False): Axis or tuple of axes along which to count non-zeros. Default is None, meaning that non-zeros will be counted along a flattened version of ``a``. - - .. versionadded:: 1.12.0 - keepdims : bool, optional If this is set to True, the axes that are counted are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. - .. versionadded:: 1.19.0 - Returns ------- count : int or array of int @@ -917,8 +897,6 @@ def outer(a, b, out=None): out : (M, N) ndarray, optional A location where the result is stored - .. versionadded:: 1.9.0 - Returns ------- out : (M, N) ndarray @@ -1024,28 +1002,48 @@ def tensordot(a, b, axes=2): Notes ----- Three common use cases are: - - * ``axes = 0`` : tensor product :math:`a\\otimes b` - * ``axes = 1`` : tensor dot product :math:`a\\cdot b` - * ``axes = 2`` : (default) tensor double contraction :math:`a:b` - - When `axes` is a positive integer ``N``, the operation starts with - axis ``-N`` of `a` and axis ``0`` of `b`, and it continues through - axis ``-1`` of `a` and axis ``N-1`` of `b` (inclusive). + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence of axes for evaluation + will be: from the -Nth axis to the -1th axis in `a`, + and from the 0th axis to (N-1)th axis in `b`. + For example, ``axes = 2`` is the equal to + ``axes = [[-2, -1], [0, 1]]``. + When N-1 is smaller than 0, or when -N is larger than -1, + the element of `a` and `b` are defined as the `axes`. When there is more than one axis to sum over - and they are not the last (first) axes of `a` (`b`) - the argument `axes` should consist of two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. + The calculation can be referred to ``numpy.einsum``. The shape of the result consists of the non-contracted axes of the first tensor, followed by the non-contracted axes of the second. Examples - -------- - A "traditional" example: + -------- + An example on integer_like: + + >>> a_0 = np.array([[1, 2], [3, 4]]) + >>> b_0 = np.array([[5, 6], [7, 8]]) + >>> c_0 = np.tensordot(a_0, b_0, axes=0) + >>> c_0.shape + (2, 2, 2, 2) + >>> c_0 + array([[[[ 5, 6], + [ 7, 8]], + [[10, 12], + [14, 16]]], + [[[15, 18], + [21, 24]], + [[20, 24], + [28, 32]]]]) + + An example on array_like: - >>> import numpy as np >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) @@ -1057,7 +1055,9 @@ def tensordot(a, b, axes=2): [4664., 5018.], [4796., 5162.], [4928., 5306.]]) - >>> # A slower but equivalent way of computing the same... + + A slower but equivalent way of computing the same... + >>> d = np.zeros((5,2)) >>> for i in range(5): ... for j in range(2): @@ -1217,8 +1217,6 @@ def roll(a, shift, axis=None): Notes ----- - .. versionadded:: 1.12.0 - Supports rolling over multiple dimensions simultaneously. Examples @@ -1272,7 +1270,7 @@ def roll(a, shift, axis=None): "'shift' and 'axis' should be scalars or 1D sequences") shifts = {ax: 0 for ax in range(a.ndim)} for sh, ax in broadcasted: - shifts[ax] += sh + shifts[ax] += int(sh) rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): @@ -1398,8 +1396,6 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): Used internally by multi-axis-checking logic. - .. versionadded:: 1.13.0 - Parameters ---------- axis : int, iterable of int @@ -1456,8 +1452,6 @@ def moveaxis(a, source, destination): Other axes remain in their original order. - .. versionadded:: 1.11.0 - Parameters ---------- a : np.ndarray @@ -1577,8 +1571,6 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Notes ----- - .. versionadded:: 1.9.0 - Supports full broadcasting of the inputs. Dimension-2 input arrays were deprecated in 2.0.0. If you do need this @@ -1768,8 +1760,6 @@ def indices(dimensions, dtype=int, sparse=False): Return a sparse representation of the grid instead of a dense representation. Default is False. - .. versionadded:: 1.17 - Returns ------- grid : one ndarray or tuple of ndarrays @@ -1854,7 +1844,7 @@ def indices(dimensions, dtype=int, sparse=False): return res -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): """ @@ -2202,7 +2192,7 @@ def _maketup(descr, val): return tuple(res) -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def identity(n, dtype=None, *, like=None): """ @@ -2279,8 +2269,6 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. - .. versionadded:: 1.10.0 - Returns ------- allclose : bool @@ -2386,8 +2374,6 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Notes ----- - .. versionadded:: 1.7.0 - For finite values, isclose uses the following equation to test whether two floating point values are equivalent.:: @@ -2457,7 +2443,7 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): elif isinstance(y, int): y = float(y) - with errstate(invalid='ignore'), _no_nep50_warning(): + with errstate(invalid='ignore'): result = (less_equal(abs(x-y), atol + rtol * abs(y)) & isfinite(y) | (x == y)) @@ -2500,8 +2486,6 @@ def array_equal(a1, a2, equal_nan=False): complex, values will be considered equal if either the real or the imaginary component of a given value is ``nan``. - .. versionadded:: 1.19.0 - Returns ------- b : bool @@ -2554,17 +2538,17 @@ def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return False if not equal_nan: - return builtins.bool((a1 == a2).all()) - cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) - and _dtype_cannot_hold_nan(a2.dtype)) - if cannot_have_nan: - if a1 is a2: - return True - return builtins.bool((a1 == a2).all()) + return builtins.bool((asanyarray(a1 == a2)).all()) if a1 is a2: # nan will compare equal so an array will compare equal to itself. return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True a1nan, a2nan = isnan(a1), isnan(a2) # NaN's occur at different locations @@ -2624,7 +2608,7 @@ def array_equiv(a1, a2): except Exception: return False - return builtins.bool((a1 == a2).all()) + return builtins.bool(asanyarray(a1 == a2).all()) def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): @@ -2705,7 +2689,7 @@ def astype(x, dtype, /, *, copy=True, device=None): def extend_all(module): existing = set(__all__) - mall = getattr(module, '__all__') + mall = module.__all__ for a in mall: if a not in existing: __all__.append(a) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index f25c6258f2d0..d23300752cd7 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -1,6 +1,8 @@ from collections.abc import Callable, Sequence from typing import ( Any, + Final, + TypeAlias, overload, TypeVar, Literal as L, @@ -9,9 +11,29 @@ from typing import ( NoReturn, TypeGuard, ) +from typing_extensions import Unpack import numpy as np from numpy import ( + # re-exports + bitwise_not, + False_, + True_, + broadcast, + dtype, + flatiter, + from_dlpack, + inf, + little_endian, + matmul, + vecdot, + nan, + ndarray, + nditer, + newaxis, + ufunc, + + # other generic, unsignedinteger, signedinteger, @@ -25,11 +47,48 @@ from numpy import ( _OrderKACF, _OrderCF, ) +from .multiarray import ( + # re-exports + arange, + array, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + can_cast, + concatenate, + copyto, + dot, + empty, + empty_like, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + may_share_memory, + min_scalar_type, + nested_iters, + putmask, + promote_types, + result_type, + shares_memory, + vdot, + where, + zeros, + + # other + _Array, + _ConstructorEmpty, + _KwargsEmpty, +) from numpy._typing import ( ArrayLike, NDArray, DTypeLike, + _SupportsDType, _ShapeLike, _DTypeLike, _ArrayLike, @@ -45,13 +104,89 @@ from numpy._typing import ( _ArrayLikeUnknown, ) +__all__ = [ + "newaxis", + "ndarray", + "flatiter", + "nditer", + "nested_iters", + "ufunc", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "zeros", + "count_nonzero", + "empty", + "broadcast", + "dtype", + "fromstring", + "fromfile", + "frombuffer", + "from_dlpack", + "where", + "argwhere", + "copyto", + "concatenate", + "lexsort", + "astype", + "can_cast", + "promote_types", + "min_scalar_type", + "result_type", + "isfortran", + "empty_like", + "zeros_like", + "ones_like", + "correlate", + "convolve", + "inner", + "dot", + "outer", + "vdot", + "roll", + "rollaxis", + "moveaxis", + "cross", + "tensordot", + "little_endian", + "fromiter", + "array_equal", + "array_equiv", + "indices", + "fromfunction", + "isclose", + "isscalar", + "binary_repr", + "base_repr", + "ones", + "identity", + "allclose", + "putmask", + "flatnonzero", + "inf", + "nan", + "False_", + "True_", + "bitwise_not", + "full", + "full_like", + "matmul", + "vecdot", + "shares_memory", + "may_share_memory", +] + _T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) - -_CorrelateMode = L["valid", "same", "full"] +_DType = TypeVar("_DType", bound=np.dtype[Any]) +_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) +_SizeType = TypeVar("_SizeType", bound=int) +_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) -__all__: list[str] +_CorrelateMode: TypeAlias = L["valid", "same", "full"] @overload def zeros_like( @@ -104,33 +239,7 @@ def zeros_like( device: None | L["cpu"] = ..., ) -> NDArray[Any]: ... -@overload -def ones( - shape: _ShapeLike, - dtype: None = ..., - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[float64]: ... -@overload -def ones( - shape: _ShapeLike, - dtype: _DTypeLike[_SCT], - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[_SCT]: ... -@overload -def ones( - shape: _ShapeLike, - dtype: DTypeLike, - order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... +ones: Final[_ConstructorEmpty] @overload def ones_like( @@ -183,35 +292,105 @@ def ones_like( device: None | L["cpu"] = ..., ) -> NDArray[Any]: ... +# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview +# 1-D shape @overload def full( - shape: _ShapeLike, + shape: _SizeType, + fill_value: _SCT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[_SizeType], _SCT]: ... +@overload +def full( + shape: _SizeType, + fill_value: Any, + dtype: _DType | _SupportsDType[_DType], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[tuple[_SizeType], _DType]: ... +@overload +def full( + shape: _SizeType, + fill_value: Any, + dtype: type[_SCT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[_SizeType], _SCT]: ... +@overload +def full( + shape: _SizeType, fill_value: Any, + dtype: None | DTypeLike = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[_SizeType], Any]: ... +# known shape +@overload +def full( + shape: _ShapeType, + fill_value: _SCT, dtype: None = ..., order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., -) -> NDArray[Any]: ... + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_ShapeType, _SCT]: ... +@overload +def full( + shape: _ShapeType, + fill_value: Any, + dtype: _DType | _SupportsDType[_DType], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[_ShapeType, _DType]: ... +@overload +def full( + shape: _ShapeType, + fill_value: Any, + dtype: type[_SCT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_ShapeType, _SCT]: ... +@overload +def full( + shape: _ShapeType, + fill_value: Any, + dtype: None | DTypeLike = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_ShapeType, Any]: ... +# unknown shape +@overload +def full( + shape: _ShapeLike, + fill_value: _SCT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[_SCT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: _DTypeLike[_SCT], + dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[Any, _DType]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: type[_SCT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[_SCT]: ... @overload def full( shape: _ShapeLike, fill_value: Any, - dtype: DTypeLike, + dtype: None | DTypeLike = ..., order: _OrderCF = ..., - *, - device: None | L["cpu"] = ..., - like: _SupportsArrayFunc = ..., + **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[Any]: ... @overload @@ -693,15 +872,15 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... @overload def astype( - x: NDArray[Any], + x: ndarray[_ShapeType, dtype[Any]], dtype: _DTypeLike[_SCT], copy: bool = ..., device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... +) -> ndarray[_ShapeType, dtype[_SCT]]: ... @overload def astype( - x: NDArray[Any], + x: ndarray[_ShapeType, dtype[Any]], dtype: DTypeLike, copy: bool = ..., device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... +) -> ndarray[_ShapeType, dtype[Any]]: ... diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index d736aecd5a35..70bba5b9c515 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -81,21 +81,21 @@ from . import multiarray as ma from .multiarray import ( - ndarray, array, dtype, datetime_data, datetime_as_string, + ndarray, dtype, datetime_data, datetime_as_string, busday_offset, busday_count, is_busday, busdaycalendar ) from .._utils import set_module # we add more at the bottom __all__ = [ - 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', - 'datetime_as_string', 'busday_offset', 'busday_count', + 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', + 'datetime_as_string', 'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar', 'isdtype' ] # we don't need all these imports, but we need to keep them for compatibility # for users using np._core.numerictypes.UPPER_TABLE -from ._string_helpers import ( +from ._string_helpers import ( # noqa: F401 english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE ) @@ -106,7 +106,7 @@ # we don't export these for import *, but we do want them accessible # as numerictypes.bool, etc. -from builtins import bool, int, float, complex, object, str, bytes +from builtins import bool, int, float, complex, object, str, bytes # noqa: F401, UP029 # We use this later @@ -229,7 +229,7 @@ def issctype(rep): return False except Exception: return False - + @set_module('numpy') def obj2sctype(rep, default=None): @@ -374,7 +374,7 @@ def _preprocess_dtype(dtype): if isinstance(dtype, ma.dtype): dtype = dtype.type if isinstance(dtype, ndarray) or dtype not in allTypes.values(): - raise _PreprocessDTypeError() + raise _PreprocessDTypeError return dtype @@ -452,7 +452,7 @@ def isdtype(dtype, kind): elif isinstance(kind, str): raise ValueError( "kind argument is a string, but" - f" {repr(kind)} is not a known kind name." + f" {kind!r} is not a known kind name." ) else: try: diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index b177dc55a6b6..ace5913f0f84 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -1,28 +1,48 @@ +import builtins from typing import ( - Literal as L, Any, - TypeVar, + Literal as L, TypedDict, + type_check_only, ) import numpy as np from numpy import ( dtype, generic, + bool, + bool_, + uint8, + uint16, + uint32, + uint64, ubyte, ushort, uintc, ulong, ulonglong, + uintp, + uint, + int8, + int16, + int32, + int64, byte, short, intc, long, longlong, + intp, + int_, + float16, + float32, + float64, half, single, double, longdouble, + complex64, + complex128, csingle, cdouble, clongdouble, @@ -32,17 +52,120 @@ from numpy import ( str_, bytes_, void, + unsignedinteger, + character, + inexact, + number, + integer, + flexible, + complexfloating, + signedinteger, + floating, ) - -from numpy._core._type_aliases import ( - sctypeDict as sctypeDict, +from ._type_aliases import sctypeDict # noqa: F401 +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + is_busday, ) from numpy._typing import DTypeLike +from numpy._typing._extended_precision import ( + uint128, + uint256, + int128, + int256, + float80, + float96, + float128, + float256, + complex160, + complex192, + complex256, + complex512, +) -_T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) +__all__ = [ + "ScalarType", + "typecodes", + "issubdtype", + "datetime_data", + "datetime_as_string", + "busday_offset", + "busday_count", + "is_busday", + "busdaycalendar", + "isdtype", + "generic", + "unsignedinteger", + "character", + "inexact", + "number", + "integer", + "flexible", + "complexfloating", + "signedinteger", + "floating", + "bool", + "float16", + "float32", + "float64", + "longdouble", + "complex64", + "complex128", + "clongdouble", + "bytes_", + "str_", + "void", + "object_", + "datetime64", + "timedelta64", + "int8", + "byte", + "uint8", + "ubyte", + "int16", + "short", + "uint16", + "ushort", + "int32", + "intc", + "uint32", + "uintc", + "int64", + "long", + "uint64", + "ulong", + "longlong", + "ulonglong", + "intp", + "uintp", + "double", + "cdouble", + "single", + "csingle", + "half", + "bool_", + "int_", + "uint", + "uint128", + "uint256", + "int128", + "int256", + "float80", + "float96", + "float128", + "float256", + "complex160", + "complex192", + "complex256", + "complex512", +] +@type_check_only class _TypeCodes(TypedDict): Character: L['c'] Integer: L['bhilqnp'] @@ -54,21 +177,16 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -__all__: list[str] - -def isdtype( - dtype: dtype[Any] | type[Any], - kind: DTypeLike | tuple[DTypeLike, ...] -) -> bool: ... +def isdtype(dtype: dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... typecodes: _TypeCodes ScalarType: tuple[ type[int], type[float], type[complex], - type[bool], + type[builtins.bool], type[bytes], type[str], type[memoryview], diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 6bb57c3dbf9a..cb466408cd39 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -1,7 +1,6 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools -import os from .._utils import set_module from .._utils._inspect import getargspec @@ -20,12 +19,13 @@ compatible with that passed in via this argument.""" ) -def set_array_function_like_doc(public_api): - if public_api.__doc__ is not None: - public_api.__doc__ = public_api.__doc__.replace( - "${ARRAY_FUNCTION_LIKE}", - array_function_like_doc, - ) +def get_array_function_like_doc(public_api, docstring_template=""): + ARRAY_FUNCTIONS.add(public_api) + docstring = public_api.__doc__ or docstring_template + return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) + +def finalize_array_function_like(public_api): + public_api.__doc__ = get_array_function_like_doc(public_api) return public_api diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 1f92500aed6e..90993badc141 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -127,7 +127,7 @@ def _parseFormats(self, formats, aligned=False): if isinstance(formats, list): dtype = sb.dtype( [ - ('f{}'.format(i), format_) + ('f{}'.format(i), format_) for i, format_ in enumerate(formats) ], aligned, @@ -403,7 +403,7 @@ def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None, ) else: self = ndarray.__new__( - subtype, shape, (record, descr), buffer=buf, + subtype, shape, (record, descr), buffer=buf, offset=offset, strides=strides, order=order ) return self @@ -453,8 +453,8 @@ def __setattr__(self, attr, val): # Automatically convert (void) structured types to records # (but not non-void structures, subarrays, or non-structured voids) if ( - attr == 'dtype' and - issubclass(val.type, nt.void) and + attr == 'dtype' and + issubclass(val.type, nt.void) and val.names is not None ): val = sb.dtype((record, val)) @@ -506,7 +506,7 @@ def __repr__(self): repr_dtype = self.dtype if ( - self.dtype.type is record or + self.dtype.type is record or not issubclass(self.dtype.type, nt.void) ): # If this is a full record array (has numpy.record dtype), diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index d88fb5c7221c..ef60803ffeb4 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,12 +1,15 @@ -import os +from _typeshed import StrOrBytesPath from collections.abc import Sequence, Iterable +from types import EllipsisType from typing import ( Any, + TypeAlias, TypeVar, overload, Protocol, SupportsIndex, - Literal + Literal, + type_check_only ) from numpy import ( @@ -16,8 +19,6 @@ from numpy import ( void, _ByteOrder, _SupportsBuffer, - _ShapeType_co, - _DType_co, _OrderKACF, ) @@ -25,16 +26,33 @@ from numpy._typing import ( ArrayLike, DTypeLike, NDArray, + _Shape, _ShapeLike, _ArrayLikeInt_co, _ArrayLikeVoid_co, _NestedSequence, ) +__all__ = [ + "record", + "recarray", + "format_parser", + "fromarrays", + "fromrecords", + "fromstring", + "fromfile", + "array", + "find_duplicate", +] + +_T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) +_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_RecArray = recarray[Any, dtype[_SCT]] +_RecArray: TypeAlias = recarray[Any, dtype[_SCT]] +@type_check_only class _SupportsReadInto(Protocol): def seek(self, offset: int, whence: int, /) -> object: ... def tell(self, /) -> int: ... @@ -49,7 +67,7 @@ class record(void): @overload def __getitem__(self, key: list[str]) -> record: ... -class recarray(ndarray[_ShapeType_co, _DType_co]): +class recarray(ndarray[_ShapeT_co, _DType_co]): # NOTE: While not strictly mandatory, we're demanding here that arguments # for the `format_parser`- and `dtype`-based dtype constructors are # mutually exclusive @@ -97,24 +115,24 @@ class recarray(ndarray[_ShapeType_co, _DType_co]): def __getitem__(self: recarray[Any, dtype[void]], indx: ( None | slice - | ellipsis + | EllipsisType | SupportsIndex | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> recarray[Any, _DType_co]: ... + | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> recarray[_Shape, _DType_co]: ... @overload def __getitem__(self, indx: ( None | slice - | ellipsis + | EllipsisType | SupportsIndex | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> ndarray[Any, _DType_co]: ... + | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] + )) -> ndarray[_Shape, _DType_co]: ... @overload def __getitem__(self, indx: str) -> NDArray[Any]: ... @overload - def __getitem__(self, indx: list[str]) -> recarray[_ShapeType_co, dtype[record]]: ... + def __getitem__(self, indx: list[str]) -> recarray[_ShapeT_co, dtype[record]]: ... @overload def field(self, attr: int | str, val: None = ...) -> Any: ... @overload @@ -131,8 +149,6 @@ class format_parser: byteorder: None | _ByteOrder = ..., ) -> None: ... -__all__: list[str] - @overload def fromarrays( arrayList: Iterable[ArrayLike], @@ -209,7 +225,7 @@ def fromstring( @overload def fromfile( - fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, + fd: StrOrBytesPath | _SupportsReadInto, dtype: DTypeLike, shape: None | _ShapeLike = ..., offset: int = ..., @@ -221,7 +237,7 @@ def fromfile( ) -> _RecArray[Any]: ... @overload def fromfile( - fd: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsReadInto, + fd: StrOrBytesPath | _SupportsReadInto, dtype: None = ..., shape: None | _ShapeLike = ..., offset: int = ..., @@ -327,3 +343,5 @@ def array( byteorder: None | _ByteOrder = ..., copy: bool = ..., ) -> _RecArray[record]: ... + +def find_duplicate(list: Iterable[_T]) -> list[_T]: ... diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index ebee4c061196..cc08ab460093 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -4,7 +4,6 @@ import functools import itertools import operator -import warnings from . import numeric as _nx from . import overrides @@ -236,7 +235,9 @@ def vstack(tup, *, dtype=None, casting="same_kind"): ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the first axis. - 1-D arrays must have the same length. + 1-D arrays must have the same length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. dtype : str or dtype If provided, the destination array will have this dtype. Cannot be @@ -309,7 +310,9 @@ def hstack(tup, *, dtype=None, casting="same_kind"): ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the second axis, - except 1-D arrays which can be any length. + except 1-D arrays which can be any length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. dtype : str or dtype If provided, the destination array will have this dtype. Cannot be @@ -383,12 +386,12 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): dimensions of the result. For example, if ``axis=0`` it will be the first dimension and if ``axis=-1`` it will be the last dimension. - .. versionadded:: 1.10.0 - Parameters ---------- - arrays : sequence of array_like - Each array must have the same shape. + arrays : sequence of ndarrays + Each array must have the same shape. In the case of a single ndarray + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. axis : int, optional The axis in the result array along which the input arrays are stacked. @@ -796,8 +799,6 @@ def block(arrays): When the nested list is two levels deep, this allows block matrices to be constructed from their components. - .. versionadded:: 1.13.0 - Parameters ---------- arrays : nested list of array_like or scalars (but not tuples) @@ -838,7 +839,6 @@ def block(arrays): Notes ----- - When called with only scalars, ``np.block`` is equivalent to an ndarray call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to ``np.array([[1, 2], [3, 4]])``. diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 627dbba06c19..0dadded9423a 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -10,11 +10,20 @@ from numpy._typing import ( _DTypeLike, ) +__all__ = [ + "atleast_1d", + "atleast_2d", + "atleast_3d", + "block", + "hstack", + "stack", + "unstack", + "vstack", +] + _SCT = TypeVar("_SCT", bound=generic) _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -__all__: list[str] - @overload def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index def9b895c872..a6b4747ca560 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -129,15 +129,15 @@ binop_should_defer(PyObject *self, PyObject *other, int inplace) * Classes with __array_ufunc__ are living in the future, and only need to * check whether __array_ufunc__ equals None. */ - attr = PyArray_LookupSpecial(other, npy_interned_str.array_ufunc); - if (attr != NULL) { + if (PyArray_LookupSpecial(other, npy_interned_str.array_ufunc, &attr) < 0) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + } + else if (attr != NULL) { defer = !inplace && (attr == Py_None); Py_DECREF(attr); return defer; } - else if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ - } + /* * Otherwise, we need to check for the legacy __array_priority__. But if * other.__class__ is a subtype of self.__class__, then it's already had diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index e05e600304d9..19ecc27761f8 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -109,7 +109,7 @@ typedef enum { */ kDLCUDAManaged = 13, /*! - * \brief Unified shared memory allocated on a oneAPI non-partititioned + * \brief Unified shared memory allocated on a oneAPI non-partitioned * device. Call to oneAPI runtime is required to determine the device * type, the USM allocation type and the sycl context it is bound to. * diff --git a/numpy/_core/src/common/get_attr_string.h b/numpy/_core/src/common/get_attr_string.h index 36d39189f9e7..324a92c5ef0c 100644 --- a/numpy/_core/src/common/get_attr_string.h +++ b/numpy/_core/src/common/get_attr_string.h @@ -2,7 +2,8 @@ #define NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ #include -#include "ufunc_object.h" +#include "npy_pycompat.h" + static inline npy_bool _is_basic_python_type(PyTypeObject *tp) @@ -44,24 +45,21 @@ _is_basic_python_type(PyTypeObject *tp) * Assumes that the special method is a numpy-specific one, so does not look * at builtin types. It does check base ndarray and numpy scalar types. * - * In future, could be made more like _Py_LookupSpecial + * It may make sense to just replace this with `PyObject_GetOptionalAttr`. */ -static inline PyObject * -PyArray_LookupSpecial(PyObject *obj, PyObject *name_unicode) +static inline int +PyArray_LookupSpecial( + PyObject *obj, PyObject *name_unicode, PyObject **res) { PyTypeObject *tp = Py_TYPE(obj); /* We do not need to check for special attributes on trivial types */ if (_is_basic_python_type(tp)) { - return NULL; - } - PyObject *res = PyObject_GetAttr((PyObject *)tp, name_unicode); - - if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); + *res = NULL; + return 0; } - return res; + return PyObject_GetOptionalAttr((PyObject *)tp, name_unicode, res); } @@ -73,23 +71,20 @@ PyArray_LookupSpecial(PyObject *obj, PyObject *name_unicode) * * Kept for backwards compatibility. In future, we should deprecate this. */ -static inline PyObject * -PyArray_LookupSpecial_OnInstance(PyObject *obj, PyObject *name_unicode) +static inline int +PyArray_LookupSpecial_OnInstance( + PyObject *obj, PyObject *name_unicode, PyObject **res) { PyTypeObject *tp = Py_TYPE(obj); /* We do not need to check for special attributes on trivial types */ + /* Note: This check should likely be reduced on Python 3.13+ */ if (_is_basic_python_type(tp)) { - return NULL; - } - - PyObject *res = PyObject_GetAttr(obj, name_unicode); - - if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); + *res = NULL; + return 0; } - return res; + return PyObject_GetOptionalAttr(obj, name_unicode, res); } #endif /* NUMPY_CORE_SRC_COMMON_GET_ATTR_STRING_H_ */ diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 70cb82bb4b2c..6766b17043ac 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -280,11 +280,11 @@ raise_missing_argument(const char *funcname, * * See macro version for an example pattern of how to use this function. * - * @param funcname - * @param cache + * @param funcname Function name + * @param cache a NULL initialized persistent storage for data * @param args Python passed args (METH_FASTCALL) - * @param len_args - * @param kwnames + * @param len_args Number of arguments (not flagged) + * @param kwnames Tuple as passed by METH_FASTCALL or NULL. * @param ... List of arguments (see macro version). * * @return Returns 0 on success and -1 on failure. diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index 9f69da1307b5..e1eef918cb33 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -69,7 +69,7 @@ NPY_NO_EXPORT int init_argparse_mutex(void); * used in cunjunction with the macro defined in the same scope. * (No two `npy_parse_arguments` may share a single `NPY_PREPARE_ARGPARSER`.) * - * @param funcname + * @param funcname Function name * @param args Python passed args (METH_FASTCALL) * @param len_args Number of arguments (not flagged) * @param kwnames Tuple as passed by METH_FASTCALL or NULL. diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index b92d58d583c0..f5b41d7068be 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -9,10 +9,18 @@ #include "numpy/npy_common.h" -#if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) -// TODO: support C++ atomics as well if this header is ever needed in C++ +#ifdef __cplusplus + extern "C++" { + #include + } + #define _NPY_USING_STD using namespace std + #define _Atomic(tp) atomic + #define STDC_ATOMICS +#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ + && !defined(__STDC_NO_ATOMICS__) #include #include + #define _NPY_USING_STD #define STDC_ATOMICS #elif _MSC_VER #include @@ -34,6 +42,7 @@ static inline npy_uint8 npy_atomic_load_uint8(const npy_uint8 *obj) { #ifdef STDC_ATOMICS + _NPY_USING_STD; return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); #elif defined(MSC_ATOMICS) #if defined(_M_X64) || defined(_M_IX86) @@ -49,19 +58,20 @@ npy_atomic_load_uint8(const npy_uint8 *obj) { static inline void* npy_atomic_load_ptr(const void *obj) { #ifdef STDC_ATOMICS + _NPY_USING_STD; return atomic_load((const _Atomic(void *)*)obj); #elif defined(MSC_ATOMICS) #if SIZEOF_VOID_P == 8 #if defined(_M_X64) || defined(_M_IX86) - return *(volatile uint64_t *)obj; + return (void *)*(volatile uint64_t *)obj; #elif defined(_M_ARM64) - return (uint64_t)__ldar64((unsigned __int64 volatile *)obj); + return (void *)__ldar64((unsigned __int64 volatile *)obj); #endif #else #if defined(_M_X64) || defined(_M_IX86) - return *(volatile uint32_t *)obj; + return (void *)*(volatile uint32_t *)obj; #elif defined(_M_ARM64) - return (uint32_t)__ldar32((unsigned __int32 volatile *)obj); + return (void *)__ldar32((unsigned __int32 volatile *)obj); #endif #endif #elif defined(GCC_ATOMICS) @@ -72,6 +82,7 @@ npy_atomic_load_ptr(const void *obj) { static inline void npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { #ifdef STDC_ATOMICS + _NPY_USING_STD; atomic_store((_Atomic(uint8_t)*)obj, value); #elif defined(MSC_ATOMICS) _InterlockedExchange8((volatile char *)obj, (char)value); @@ -84,6 +95,7 @@ static inline void npy_atomic_store_ptr(void *obj, void *value) { #ifdef STDC_ATOMICS + _NPY_USING_STD; atomic_store((_Atomic(void *)*)obj, value); #elif defined(MSC_ATOMICS) _InterlockedExchangePointer((void * volatile *)obj, (void *)value); diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 43f2c435a140..7c0a4c60294c 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -849,7 +849,7 @@ npy__cpu_init_features(void) { /* * just in case if the compiler doesn't respect ANSI - * but for knowing platforms it still nessecery, because @npy__cpu_init_features + * but for knowing platforms it still necessary, because @npy__cpu_init_features * may called multiple of times and we need to clear the disabled features by * ENV Var or maybe in the future we can support other methods like * global variables, go back to @npy__cpu_try_disable_env for more understanding diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.cpp similarity index 90% rename from numpy/_core/src/common/npy_hashtable.c rename to numpy/_core/src/common/npy_hashtable.cpp index 5c745ba388cd..a4244fae88cb 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -12,8 +12,12 @@ * case is likely desired. */ +#include +#include + #include "templ_common.h" #include "npy_hashtable.h" +#include @@ -29,18 +33,6 @@ #define _NpyHASH_XXROTATE(x) ((x << 13) | (x >> 19)) /* Rotate left 13 bits */ #endif -#ifdef Py_GIL_DISABLED -#define LOCK_TABLE(tb) PyMutex_Lock(&tb->mutex) -#define UNLOCK_TABLE(tb) PyMutex_Unlock(&tb->mutex) -#define INITIALIZE_LOCK(tb) memset(&tb->mutex, 0, sizeof(PyMutex)) -#else -// the GIL serializes access to the table so no need -// for locking if it is enabled -#define LOCK_TABLE(tb) -#define UNLOCK_TABLE(tb) -#define INITIALIZE_LOCK(tb) -#endif - /* * This hashing function is basically the Python tuple hash with the type * identity hash inlined. The tuple hash itself is a reduced version of xxHash. @@ -101,7 +93,7 @@ find_item(PyArrayIdentityHash const *tb, PyObject *const *key) NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len) { - PyArrayIdentityHash *res = PyMem_Malloc(sizeof(PyArrayIdentityHash)); + PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); if (res == NULL) { PyErr_NoMemory(); return NULL; @@ -112,14 +104,21 @@ PyArrayIdentityHash_New(int key_len) res->size = 4; /* Start with a size of 4 */ res->nelem = 0; - INITIALIZE_LOCK(res); - - res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); + res->buckets = (PyObject **)PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); PyMem_Free(res); return NULL; } + +#ifdef Py_GIL_DISABLED + res->mutex = new(std::nothrow) std::shared_mutex(); + if (res->mutex == nullptr) { + PyErr_NoMemory(); + PyMem_Free(res); + return NULL; + } +#endif return res; } @@ -128,6 +127,9 @@ NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { PyMem_Free(tb->buckets); +#ifdef Py_GIL_DISABLED + delete (std::shared_mutex *)tb->mutex; +#endif PyMem_Free(tb); } @@ -163,7 +165,7 @@ _resize_if_necessary(PyArrayIdentityHash *tb) if (npy_mul_sizes_with_overflow(&alloc_size, new_size, tb->key_len + 1)) { return -1; } - tb->buckets = PyMem_Calloc(alloc_size, sizeof(PyObject *)); + tb->buckets = (PyObject **)PyMem_Calloc(alloc_size, sizeof(PyObject *)); if (tb->buckets == NULL) { tb->buckets = old_table; PyErr_NoMemory(); @@ -206,17 +208,14 @@ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace) { - LOCK_TABLE(tb); if (value != NULL && _resize_if_necessary(tb) < 0) { /* Shrink, only if a new value is added. */ - UNLOCK_TABLE(tb); return -1; } PyObject **tb_item = find_item(tb, key); if (value != NULL) { if (tb_item[0] != NULL && tb_item[0] != value && !replace) { - UNLOCK_TABLE(tb); PyErr_SetString(PyExc_RuntimeError, "Identity cache already includes an item with this key."); return -1; @@ -230,7 +229,6 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, memset(tb_item, 0, (tb->key_len + 1) * sizeof(PyObject *)); } - UNLOCK_TABLE(tb); return 0; } @@ -238,8 +236,6 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { - LOCK_TABLE(tb); PyObject *res = find_item(tb, key)[0]; - UNLOCK_TABLE(tb); return res; } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index 583f3d9861a6..cd061ba6fa11 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -7,6 +7,10 @@ #include "numpy/ndarraytypes.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct { int key_len; /* number of identities used */ /* Buckets stores: val1, key1[0], key1[1], ..., val2, key2[0], ... */ @@ -14,11 +18,7 @@ typedef struct { npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ #ifdef Py_GIL_DISABLED -#if PY_VERSION_HEX < 0x30d00b3 -#error "GIL-disabled builds require Python 3.13.0b3 or newer" -#else - PyMutex mutex; -#endif + void *mutex; #endif } PyArrayIdentityHash; @@ -36,4 +36,8 @@ PyArrayIdentityHash_New(int key_len); NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ */ diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 9df85357b5ec..970efa8f549e 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -81,7 +81,7 @@ npy_import(const char *module, const char *attr) * * @param module Absolute module name. * @param attr module attribute to cache. - * @param cache Storage location for imported function. + * @param obj Storage location for imported function. */ static inline int npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { diff --git a/numpy/_core/src/common/numpyos.c b/numpy/_core/src/common/numpyos.c index 319f5dcc395f..a5ca28081d52 100644 --- a/numpy/_core/src/common/numpyos.c +++ b/numpy/_core/src/common/numpyos.c @@ -282,7 +282,7 @@ fix_ascii_format(char* buf, size_t buflen, int decimal) * - format: The printf()-style format to use for the code to use for * converting. * - value: The value to convert - * - decimal: if != 0, always has a decimal, and at leasat one digit after + * - decimal: if != 0, always has a decimal, and at least one digit after * the decimal. This has the same effect as passing 'Z' in the original * PyOS_ascii_formatd * diff --git a/numpy/_core/src/common/numpyos.h b/numpy/_core/src/common/numpyos.h index fac82f7d438c..8fbecb122577 100644 --- a/numpy/_core/src/common/numpyos.h +++ b/numpy/_core/src/common/numpyos.h @@ -51,7 +51,7 @@ NPY_NO_EXPORT int NumPyOS_ascii_isupper(char c); NPY_NO_EXPORT int -NumPyOS_ascii_tolower(char c); +NumPyOS_ascii_tolower(int c); /* Convert a string to an int in an arbitrary base */ NPY_NO_EXPORT npy_longlong diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat index 01341acbbef0..0f1d42a10a3f 160000 --- a/numpy/_core/src/common/pythoncapi-compat +++ b/numpy/_core/src/common/pythoncapi-compat @@ -1 +1 @@ -Subproject commit 01341acbbef0ca85cf2fa31b63307ddf4d9a87fb +Subproject commit 0f1d42a10a3f594ad48894912396df31b2c2d55d diff --git a/numpy/_core/src/common/simd/avx2/memory.h b/numpy/_core/src/common/simd/avx2/memory.h index f18636538174..8b30cb4cdf6c 100644 --- a/numpy/_core/src/common/simd/avx2/memory.h +++ b/numpy/_core/src/common/simd/avx2/memory.h @@ -705,7 +705,7 @@ NPYV_IMPL_AVX2_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_AVX2_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_AVX2_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/avx512/memory.h b/numpy/_core/src/common/simd/avx512/memory.h index e981ef8f6dd1..53e24477e6ac 100644 --- a/numpy/_core/src/common/simd/avx512/memory.h +++ b/numpy/_core/src/common/simd/avx512/memory.h @@ -651,7 +651,7 @@ NPYV_IMPL_AVX512_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_AVX512_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_AVX512_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/neon/memory.h b/numpy/_core/src/common/simd/neon/memory.h index e7503b822e03..777cb87f5bab 100644 --- a/numpy/_core/src/common/simd/neon/memory.h +++ b/numpy/_core/src/common/simd/neon/memory.h @@ -584,7 +584,7 @@ NPYV_IMPL_NEON_REST_PARTIAL_TYPES_PAIR(f64, s64) #endif /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_NEON_MEM_INTERLEAVE(SFX, T_PTR) \ diff --git a/numpy/_core/src/common/simd/sse/memory.h b/numpy/_core/src/common/simd/sse/memory.h index 90c01ffefedb..0cd52a88fb89 100644 --- a/numpy/_core/src/common/simd/sse/memory.h +++ b/numpy/_core/src/common/simd/sse/memory.h @@ -683,7 +683,7 @@ NPYV_IMPL_SSE_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_SSE_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_SSE_MEM_INTERLEAVE(SFX, ZSFX) \ diff --git a/numpy/_core/src/common/simd/vec/memory.h b/numpy/_core/src/common/simd/vec/memory.h index dbcdc16da395..3e8583bed1e0 100644 --- a/numpy/_core/src/common/simd/vec/memory.h +++ b/numpy/_core/src/common/simd/vec/memory.h @@ -623,7 +623,7 @@ NPYV_IMPL_VEC_REST_PARTIAL_TYPES_PAIR(u64, s64) NPYV_IMPL_VEC_REST_PARTIAL_TYPES_PAIR(f64, s64) /************************************************************ - * de-interlave load / interleave contiguous store + * de-interleave load / interleave contiguous store ************************************************************/ // two channels #define NPYV_IMPL_VEC_MEM_INTERLEAVE(SFX) \ diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index 17b678edd4bf..e98315f14a94 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -1,6 +1,7 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#include "numpy/ndarrayobject.h" #include "numpy/ndarraytypes.h" #include "npy_pycompat.h" #include "get_attr_string.h" @@ -35,14 +36,12 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) * Does the class define __array_ufunc__? (Note that LookupSpecial has fast * return for basic python types, so no need to worry about those here) */ - cls_array_ufunc = PyArray_LookupSpecial(obj, npy_interned_str.array_ufunc); - if (cls_array_ufunc == NULL) { - if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ - } + if (PyArray_LookupSpecial( + obj, npy_interned_str.array_ufunc, &cls_array_ufunc) < 0) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ return NULL; } - /* Ignore if the same as ndarray.__array_ufunc__ */ + /* Ignore if the same as ndarray.__array_ufunc__ (it may be NULL here) */ if (cls_array_ufunc == npy_static_pydata.ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 5975f5ef76c3..0b696633f9ad 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 5975f5ef76c3e4364844d869454046f0f8420ef8 +Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca diff --git a/numpy/_core/src/multiarray/_datetime.h b/numpy/_core/src/multiarray/_datetime.h index c477d334e19d..dd25e1ffd6cc 100644 --- a/numpy/_core/src/multiarray/_datetime.h +++ b/numpy/_core/src/multiarray/_datetime.h @@ -328,4 +328,10 @@ find_object_datetime_type(PyObject *obj, int type_num); NPY_NO_EXPORT int PyArray_InitializeDatetimeCasts(void); +NPY_NO_EXPORT npy_hash_t +datetime_hash(PyArray_DatetimeMetaData *meta, npy_datetime dt); + +NPY_NO_EXPORT npy_hash_t +timedelta_hash(PyArray_DatetimeMetaData *meta, npy_timedelta td); + #endif /* NUMPY_CORE_SRC_MULTIARRAY__DATETIME_H_ */ diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 5d0d91f1e996..fc73a64b19a0 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -1877,7 +1877,9 @@ get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args) result = _controlfp(0, 0); return PyLong_FromLongLong(result); } -#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) || (defined(_MSC_VER) && defined(__clang__)) +#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) \ + || (defined(_MSC_VER) && defined(__clang__) && \ + (defined(_M_IX86) || defined(_M_AMD64))) { unsigned short cw = 0; __asm__("fstcw %w0" : "=m" (cw)); diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 214833737792..1ef0ede62a11 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -177,7 +177,6 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) /* This is a back-compat fallback to usually do the right thing... */ PyArray_DTypeMeta *uint8_dt = &PyArray_UInt8DType; PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt); - Py_DECREF(uint8_dt); if (res == NULL) { PyErr_Clear(); } @@ -406,7 +405,7 @@ npy_update_operand_for_scalar( else if (NPY_UNLIKELY(casting == NPY_EQUIV_CASTING) && descr->type_num != NPY_OBJECT) { /* - * increadibly niche, but users could pass equiv casting and we + * incredibly niche, but users could pass equiv casting and we * actually need to cast. Let object pass (technically correct) but * in all other cases, we don't technically consider equivalent. * NOTE(seberg): I don't think we should be beholden to this logic. @@ -477,7 +476,6 @@ npy_find_descr_for_scalar( /* If the DType doesn't know the scalar type, guess at default. */ !NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) { if (common->singleton != NULL) { - Py_INCREF(common->singleton); res = common->singleton; Py_INCREF(res); } diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index b7e7c9948ce1..33b8ecc7e0f8 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -80,6 +80,24 @@ _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) } +NPY_FINLINE void +indicate_hugepages(void *p, size_t size) { +#ifdef NPY_OS_LINUX + /* allow kernel allocating huge pages for large arrays */ + if (NPY_UNLIKELY(size >= ((1u<<22u))) && + npy_thread_unsafe_state.madvise_hugepage) { + npy_uintp offset = 4096u - (npy_uintp)p % (4096u); + npy_uintp length = size - offset; + /** + * Intentionally not checking for errors that may be returned by + * older kernel versions; optimistically tries enabling huge pages. + */ + madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE); + } +#endif +} + + /* as the cache is managed in global variables verify the GIL is held */ /* @@ -108,19 +126,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #ifdef _PyPyGC_AddMemoryPressure _PyPyPyGC_AddMemoryPressure(nelem * esz); #endif -#ifdef NPY_OS_LINUX - /* allow kernel allocating huge pages for large arrays */ - if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && - npy_thread_unsafe_state.madvise_hugepage) { - npy_uintp offset = 4096u - (npy_uintp)p % (4096u); - npy_uintp length = nelem * esz - offset; - /** - * Intentionally not checking for errors that may be returned by - * older kernel versions; optimistically tries enabling huge pages. - */ - madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE); - } -#endif + indicate_hugepages(p, nelem * esz); } return p; } @@ -172,6 +178,9 @@ npy_alloc_cache_zero(size_t nmemb, size_t size) NPY_BEGIN_THREADS; p = PyDataMem_NEW_ZEROED(nmemb, size); NPY_END_THREADS; + if (p) { + indicate_hugepages(p, sz); + } return p; } @@ -238,7 +247,11 @@ PyDataMem_NEW(size_t size) assert(size != 0); result = malloc(size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -251,7 +264,11 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) void *result; result = calloc(nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -274,11 +291,13 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = realloc(ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } @@ -309,6 +328,9 @@ default_calloc(void *NPY_UNUSED(ctx), size_t nelem, size_t elsize) } NPY_BEGIN_THREADS; p = calloc(nelem, elsize); + if (p) { + indicate_hugepages(p, sz); + } NPY_END_THREADS; return p; } @@ -362,7 +384,11 @@ PyDataMem_UserNEW(size_t size, PyObject *mem_handler) } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -376,7 +402,11 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -406,11 +436,13 @@ PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) } assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index 69da09875bfb..ff7d98bd9c64 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -6,6 +6,7 @@ #include #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "lowlevel_strided_loops.h" #include "numpy/arrayobject.h" @@ -224,36 +225,39 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) PyObject *DType; if (pytype == &PyArray_Type) { - DType = Py_None; + DType = Py_NewRef(Py_None); } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatDType; + DType = Py_NewRef((PyObject *)&PyArray_PyFloatDType); } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyLongDType; + DType = Py_NewRef((PyObject *)&PyArray_PyLongDType); } else { - DType = PyDict_GetItem(_global_pytype_to_type_dict, - (PyObject *)pytype); + int res = PyDict_GetItemRef(_global_pytype_to_type_dict, + (PyObject *)pytype, (PyObject **)&DType); - if (DType == NULL) { - /* the python type is not known */ + if (res <= 0) { + /* the python type is not known or an error was set */ return NULL; } } - Py_INCREF(DType); assert(DType == Py_None || PyObject_TypeCheck(DType, (PyTypeObject *)&PyArrayDTypeMeta_Type)); return (PyArray_DTypeMeta *)DType; } /* - * Note: This function never fails, but will return `NULL` for unknown scalars - * and `None` for known array-likes (e.g. tuple, list, ndarray). + * Note: This function never fails, but will return `NULL` for unknown scalars or + * known array-likes (e.g. tuple, list, ndarray). */ NPY_NO_EXPORT PyObject * PyArray_DiscoverDTypeFromScalarType(PyTypeObject *pytype) { - return (PyObject *)npy_discover_dtype_from_pytype(pytype); + PyObject *DType = (PyObject *)npy_discover_dtype_from_pytype(pytype); + if (DType == NULL || DType == Py_None) { + return NULL; + } + return DType; } @@ -660,8 +664,8 @@ npy_new_coercion_cache( /** * Unlink coercion cache item. * - * @param current - * @return next coercion cache object (or NULL) + * @param current This coercion cache object + * @return next Next coercion cache object (or NULL) */ NPY_NO_EXPORT coercion_cache_obj * npy_unlink_coercion_cache(coercion_cache_obj *current) @@ -905,7 +909,7 @@ find_descriptor_from_array( * it supports inspecting the elements when the array has object dtype * (and the given datatype describes a parametric DType class). * - * @param arr + * @param arr The array object. * @param dtype NULL or a dtype class * @param descr A dtype instance, if the dtype is NULL the dtype class is * found and e.g. "S0" is converted to denote only String. diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index f09e560b0607..5554cad5e2dd 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -123,15 +123,16 @@ is_contiguous( * true, i.e., for cast safety "no-cast". It will not recognize view as an * option for other casts (e.g., viewing '>i8' as '>i4' with an offset of 4). * - * @param context - * @param aligned - * @param move_references UNUSED. - * @param strides - * @param descriptors - * @param out_loop - * @param out_transferdata - * @param flags - * @return 0 on success -1 on failure. + * @param context The arraymethod context + * @param aligned Flag indicating data is aligned (1) or not (0) + * param move_references UNUSED -- listed below but doxygen doesn't see as a parameter + * @param strides Array of step sizes for each dimension of the arrays involved + * @param out_loop Output pointer to the function that will perform the strided loop. + * @param out_transferdata Output pointer to auxiliary data (if any) + * needed by the out_loop function. + * @param flags Output pointer to additional flags (if any) + * needed by the out_loop function + * @returns 0 on success -1 on failure. */ NPY_NO_EXPORT int npy_default_get_strided_loop( @@ -169,7 +170,7 @@ npy_default_get_strided_loop( /** * Validate that the input is usable to create a new ArrayMethod. * - * @param spec + * @param spec Array method specification to be validated * @return 0 on success -1 on error. */ static int diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index e4248ad29aba..9834ab138cf6 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -4,6 +4,7 @@ #include #include "structmember.h" +#include "numpy/ndarrayobject.h" #include "numpy/ndarraytypes.h" #include "get_attr_string.h" #include "npy_import.h" @@ -25,8 +26,9 @@ get_array_function(PyObject *obj) return npy_static_pydata.ndarray_array_function; } - PyObject *array_function = PyArray_LookupSpecial(obj, npy_interned_str.array_function); - if (array_function == NULL && PyErr_Occurred()) { + PyObject *array_function; + if (PyArray_LookupSpecial( + obj, npy_interned_str.array_function, &array_function) < 0) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ } @@ -153,11 +155,22 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, return Py_NotImplemented; } } - - PyObject *implementation = PyObject_GetAttr(func, npy_interned_str.implementation); - if (implementation == NULL) { + /* + * Python functions are wrapped, and we should now call their + * implementation, so that we do not dispatch a second time + * on possible subclasses. + * C functions that can be overridden with "like" are not wrapped and + * thus do not have an _implementation attribute, but since the like + * keyword has been removed, we can safely call those directly. + */ + PyObject *implementation; + if (PyObject_GetOptionalAttr( + func, npy_interned_str.implementation, &implementation) < 0) { return NULL; } + else if (implementation == NULL) { + return PyObject_Call(func, args, kwargs); + } PyObject *result = PyObject_Call(implementation, args, kwargs); Py_DECREF(implementation); return result; diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index 9524be8a0c89..931ced5d8176 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -275,41 +275,10 @@ static int #endif ) { PyArray_Descr *descr = PyArray_DescrFromType(NPY_@TYPE@); - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_LEGACY_PROMOTION || ( - promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings())) { - /* - * This path will be taken both for the "promotion" case such as - * `uint8_arr + 123` as well as the assignment case. - * The "legacy" path should only ever be taken for assignment - * (legacy promotion will prevent overflows by promoting up) - * so a normal deprecation makes sense. - * When weak promotion is active, we use "future" behavior unless - * warnings were explicitly opt-in. - */ - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "NumPy will stop allowing conversion of out-of-bound " - "Python integers to integer arrays. The conversion " - "of %.100R to %S will fail in the future.\n" - "For the old behavior, usually:\n" - " np.array(value).astype(dtype)\n" - "will give the desired result (the cast overflows).", - obj, descr) < 0) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - return 0; - } - else { - /* Live in the future, outright error: */ - PyErr_Format(PyExc_OverflowError, - "Python integer %R out of bounds for %S", obj, descr); - Py_DECREF(descr); - return -1; - } - assert(0); + PyErr_Format(PyExc_OverflowError, + "Python integer %R out of bounds for %S", obj, descr); + Py_DECREF(descr); + return -1; } return 0; } diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index ae7b6e987ff8..51c791cf9f83 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -57,11 +57,12 @@ npy_find_array_wrap( } } else { - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_interned_str.array_wrap); - if (new_wrap == NULL) { - if (PyErr_Occurred()) { - goto fail; - } + PyObject *new_wrap; + if (PyArray_LookupSpecial_OnInstance( + obj, npy_interned_str.array_wrap, &new_wrap) < 0) { + goto fail; + } + else if (new_wrap == NULL) { continue; } double curr_priority = PyArray_GetPriority(obj, 0); @@ -159,15 +160,14 @@ npy_apply_wrap( } else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ - new_wrap = PyArray_LookupSpecial_OnInstance( - original_out, npy_interned_str.array_wrap); - if (new_wrap != NULL) { + if (PyArray_LookupSpecial_OnInstance( + original_out, npy_interned_str.array_wrap, &new_wrap) < 0) { + return NULL; + } + else if (new_wrap != NULL) { wrap = new_wrap; wrap_type = (PyObject *)Py_TYPE(original_out); } - else if (PyErr_Occurred()) { - return NULL; - } } } /* diff --git a/numpy/_core/src/multiarray/buffer.c b/numpy/_core/src/multiarray/buffer.c index f83e7b918e4e..fcff3ad6ca74 100644 --- a/numpy/_core/src/multiarray/buffer.c +++ b/numpy/_core/src/multiarray/buffer.c @@ -26,7 +26,7 @@ /************************************************************************* * PEP 3118 buffer protocol * - * Implementing PEP 3118 is somewhat convoluted because of the desirata: + * Implementing PEP 3118 is somewhat convoluted because of the requirements: * * - Don't add new members to ndarray or descr structs, to preserve binary * compatibility. (Also, adding the items is actually not very useful, diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 655122ff7f09..8236ec5c65ae 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -62,7 +62,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { - int itemsize; + npy_intp itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); @@ -75,6 +75,12 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT) { + /* We can allow this, but should audit code paths before we do. */ + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); @@ -86,6 +92,11 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { @@ -188,9 +199,9 @@ _IsWriteable(PyArrayObject *ap) /** * Convert an array shape to a string such as "(1, 2)". * - * @param Dimensionality of the shape - * @param npy_intp pointer to shape array - * @param String to append after the shape `(1, 2)%s`. + * @param n Dimensionality of the shape + * @param vals npy_intp pointer to shape array + * @param ending String to append after the shape `(1, 2)%s`. * * @return Python unicode string */ @@ -299,12 +310,11 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j) /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) * - * @param "value" should be the tuple. - * - * @return "descr" will be set to the field's dtype - * @return "offset" will be set to the field's offset + * @param value should be the tuple. + * @param descr will be set to the field's dtype + * @param offset will be set to the field's offset * - * returns -1 on failure, 0 on success. + * @return -1 on failure, 0 on success. */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 19fba9e66d01..f4ba10d42e18 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -12,6 +12,10 @@ #include "npy_import.h" #include +#ifdef __cplusplus +extern "C" { +#endif + #define error_converting(x) (((x) == -1) && PyErr_Occurred()) #ifdef NPY_ALLOW_THREADS @@ -71,13 +75,6 @@ dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j); /** * unpack tuple of PyDataType_FIELDS(dtype) (descr, offset, title[not-needed]) - * - * @param "value" should be the tuple. - * - * @return "descr" will be set to the field's dtype - * @return "offset" will be set to the field's offset - * - * returns -1 on failure, 0 on success. */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset); @@ -111,13 +108,13 @@ check_and_adjust_index(npy_intp *index, npy_intp max_item, int axis, /* Try to be as clear as possible about what went wrong. */ if (axis >= 0) { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for axis %d with size %"NPY_INTP_FMT, + "index %" NPY_INTP_FMT" is out of bounds " + "for axis %d with size %" NPY_INTP_FMT, *index, axis, max_item); } else { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for size %"NPY_INTP_FMT, *index, max_item); + "index %" NPY_INTP_FMT " is out of bounds " + "for size %" NPY_INTP_FMT, *index, max_item); } return -1; } @@ -170,7 +167,9 @@ check_and_adjust_axis(int *axis, int ndim) * . * clang versions < 8.0.0 have the same bug. */ -#if (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ +#ifdef __cplusplus +#define NPY_ALIGNOF(type) alignof(type) +#elif (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ || (defined __GNUC__ && __GNUC__ < 4 + (__GNUC_MINOR__ < 9) \ && !defined __clang__) \ || (defined __clang__ && __clang_major__ < 8)) @@ -354,4 +353,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, */ #define NPY_ITER_REDUCTION_AXIS(axis) (axis + (1 << (NPY_BITSOF_INT - 2))) +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ */ diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index a65aba060a55..fabe595815d6 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -106,7 +106,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) * default_builtin_common_dtype * * @param length Number of DTypes - * @param dtypes + * @param dtypes List of DTypes to be reduced */ static PyArray_DTypeMeta * reduce_dtypes_to_most_knowledgeable( diff --git a/numpy/_core/src/multiarray/compiled_base.h b/numpy/_core/src/multiarray/compiled_base.h index e0e73ac798bf..b8081c8d3a55 100644 --- a/numpy/_core/src/multiarray/compiled_base.h +++ b/numpy/_core/src/multiarray/compiled_base.h @@ -10,9 +10,9 @@ arr_bincount(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr__monotonicity(PyObject *, PyObject *, PyObject *kwds); NPY_NO_EXPORT PyObject * -arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * -arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr_ravel_multi_index(PyObject *, PyObject *, PyObject *); NPY_NO_EXPORT PyObject * diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index e7b1936d1706..709bbe6557fc 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -1123,7 +1123,7 @@ PyArray_IntpFromPyIntConverter(PyObject *o, npy_intp *val) * @param seq A sequence created using `PySequence_Fast`. * @param vals Array used to store dimensions (must be large enough to * hold `maxvals` values). - * @param max_vals Maximum number of dimensions that can be written into `vals`. + * @param maxvals Maximum number of dimensions that can be written into `vals`. * @return Number of dimensions or -1 if an error occurred. * * .. note:: diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 550d3e253868..00251af5bf68 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -49,18 +49,6 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -static NPY_TLS int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; - -NPY_NO_EXPORT int -get_npy_promotion_state() { - return npy_promotion_state; -} - -NPY_NO_EXPORT void -set_npy_promotion_state(int new_promotion_state) { - npy_promotion_state = new_promotion_state; -} - static PyObject * PyArray_GetGenericToVoidCastingImpl(void); @@ -74,120 +62,24 @@ static PyObject * PyArray_GetObjectToGenericCastingImpl(void); -/* - * Return 1 if promotion warnings should be given and 0 if they are currently - * suppressed in the local context. - */ -NPY_NO_EXPORT int -npy_give_promotion_warnings(void) -{ - PyObject *val; - - if (npy_cache_import_runtime( - "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &npy_runtime_imports.NO_NEP50_WARNING) == -1) { - PyErr_WriteUnraisable(NULL); - return 1; - } - - if (PyContextVar_Get(npy_runtime_imports.NO_NEP50_WARNING, - Py_False, &val) < 0) { - /* Errors should not really happen, but if it does assume we warn. */ - PyErr_WriteUnraisable(NULL); - return 1; - } - Py_DECREF(val); - /* only when the no-warnings context is false, we give warnings */ - return val == Py_False; -} - - -NPY_NO_EXPORT PyObject * -npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)) { - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_WEAK_PROMOTION) { - return PyUnicode_FromString("weak"); - } - else if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { - return PyUnicode_FromString("weak_and_warn"); - } - else if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - return PyUnicode_FromString("legacy"); - } - PyErr_SetString(PyExc_SystemError, "invalid promotion state!"); - return NULL; -} - - -NPY_NO_EXPORT PyObject * -npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) -{ - if (!PyUnicode_Check(arg)) { - PyErr_SetString(PyExc_TypeError, - "_set_promotion_state() argument or NPY_PROMOTION_STATE " - "must be a string."); - return NULL; - } - int new_promotion_state; - if (PyUnicode_CompareWithASCIIString(arg, "weak") == 0) { - new_promotion_state = NPY_USE_WEAK_PROMOTION; - } - else if (PyUnicode_CompareWithASCIIString(arg, "weak_and_warn") == 0) { - new_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; - } - else if (PyUnicode_CompareWithASCIIString(arg, "legacy") == 0) { - new_promotion_state = NPY_USE_LEGACY_PROMOTION; - } - else { - PyErr_Format(PyExc_TypeError, - "_set_promotion_state() argument or NPY_PROMOTION_STATE must be " - "'weak', 'legacy', or 'weak_and_warn' but got '%.100S'", arg); - return NULL; - } - set_npy_promotion_state(new_promotion_state); - Py_RETURN_NONE; -} - -/** - * Fetch the casting implementation from one DType to another. - * - * @params from - * @params to - * - * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an - * error set. - */ -NPY_NO_EXPORT PyObject * -PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +static PyObject * +create_casting_impl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { - PyObject *res; - if (from == to) { - res = (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl; - } - else { - res = PyDict_GetItemWithError(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to); - } - if (res != NULL || PyErr_Occurred()) { - Py_XINCREF(res); - return res; - } /* - * The following code looks up CastingImpl based on the fact that anything + * Look up CastingImpl based on the fact that anything * can be cast to and from objects or structured (void) dtypes. - * - * The last part adds casts dynamically based on legacy definition */ if (from->type_num == NPY_OBJECT) { - res = PyArray_GetObjectToGenericCastingImpl(); + return PyArray_GetObjectToGenericCastingImpl(); } else if (to->type_num == NPY_OBJECT) { - res = PyArray_GetGenericToObjectCastingImpl(); + return PyArray_GetGenericToObjectCastingImpl(); } else if (from->type_num == NPY_VOID) { - res = PyArray_GetVoidToGenericCastingImpl(); + return PyArray_GetVoidToGenericCastingImpl(); } else if (to->type_num == NPY_VOID) { - res = PyArray_GetGenericToVoidCastingImpl(); + return PyArray_GetGenericToVoidCastingImpl(); } /* * Reject non-legacy dtypes. They need to use the new API to add casts and @@ -211,50 +103,113 @@ PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) from->singleton, to->type_num); if (castfunc == NULL) { PyErr_Clear(); - /* Remember that this cast is not possible */ - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *) to, Py_None) < 0) { - return NULL; - } Py_RETURN_NONE; } } - - /* PyArray_AddLegacyWrapping_CastingImpl find the correct casting level: */ - /* - * TODO: Possibly move this to the cast registration time. But if we do - * that, we have to also update the cast when the casting safety - * is registered. + /* Create a cast using the state of the legacy casting setup defined + * during the setup of the DType. + * + * Ideally we would do this when we create the DType, but legacy user + * DTypes don't have a way to signal that a DType is done setting up + * casts. Without such a mechanism, the safest way to know that a + * DType is done setting up is to register the cast lazily the first + * time a user does the cast. + * + * We *could* register the casts when we create the wrapping + * DTypeMeta, but that means the internals of the legacy user DType + * system would need to update the state of the casting safety flags + * in the cast implementations stored on the DTypeMeta. That's an + * inversion of abstractions and would be tricky to do without + * creating circular dependencies inside NumPy. */ if (PyArray_AddLegacyWrapping_CastingImpl(from, to, -1) < 0) { return NULL; } + /* castingimpls is unconditionally filled by + * AddLegacyWrapping_CastingImpl, so this won't create a recursive + * critical section + */ return PyArray_GetCastingImpl(from, to); } +} - if (res == NULL) { +static PyObject * +ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + int return_error = 0; + PyObject *res = NULL; + + /* Need to create the cast. This might happen at runtime so we enter a + critical section to avoid races */ + + Py_BEGIN_CRITICAL_SECTION(NPY_DT_SLOTS(from)->castingimpls); + + /* check if another thread filled it while this thread was blocked on + acquiring the critical section */ + if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to, + &res) < 0) { + return_error = 1; + } + else if (res == NULL) { + res = create_casting_impl(from, to); + if (res == NULL) { + return_error = 1; + } + else if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, res) < 0) { + return_error = 1; + } + } + Py_END_CRITICAL_SECTION(); + if (return_error) { + Py_XDECREF(res); return NULL; } - if (from == to) { + if (from == to && res == Py_None) { PyErr_Format(PyExc_RuntimeError, "Internal NumPy error, within-DType cast missing for %S!", from); Py_DECREF(res); return NULL; } - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *)to, res) < 0) { - Py_DECREF(res); + return res; +} + +/** + * Fetch the casting implementation from one DType to another. + * + * @param from The implementation to cast from + * @param to The implementation to cast to + * + * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an + * error set. + */ +NPY_NO_EXPORT PyObject * +PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + PyObject *res = NULL; + if (from == to) { + if ((NPY_DT_SLOTS(from)->within_dtype_castingimpl) != NULL) { + res = Py_XNewRef( + (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl); + } + } + else if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, &res) < 0) { return NULL; } - return res; + if (res != NULL) { + return res; + } + + return ensure_castingimpl_exists(from, to); } /** * Fetch the (bound) casting implementation from one DType to another. * - * @params from - * @params to + * @params from source DType + * @params to destination DType * * @returns A bound casting implementation or None (or NULL for error). */ @@ -305,8 +260,8 @@ _get_castingimpl(PyObject *NPY_UNUSED(module), PyObject *args) * extending cast-levels if necessary. * It is not valid for one of the arguments to be -1 to indicate an error. * - * @param casting1 - * @param casting2 + * @param casting1 First (left-hand) casting level to compare + * @param casting2 Second (right-hand) casting level to compare * @return The minimal casting error (can be -1). */ NPY_NO_EXPORT NPY_CASTING @@ -495,11 +450,13 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * implementations fully to have them available for doing the actual cast * later. * - * @param from + * @param from The descriptor to cast from * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). - * @param[out] view_offset + * @param view_offset If set, the cast can be described by a view with + * this byte offset. For example, casting "i8" to "i8," + * (the structured dtype) can be described with `*view_offset = 0`. * @return NPY_CASTING or -1 on error or if the cast is not possible. */ NPY_NO_EXPORT NPY_CASTING @@ -544,7 +501,7 @@ PyArray_GetCastInfo( * user would have to guess the string length.) * * @param casting the requested casting safety. - * @param from + * @param from The descriptor to cast from * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). @@ -724,26 +681,6 @@ dtype_kind_to_ordering(char kind) } } -/* Converts a type number from unsigned to signed */ -static int -type_num_unsigned_to_signed(int type_num) -{ - switch (type_num) { - case NPY_UBYTE: - return NPY_BYTE; - case NPY_USHORT: - return NPY_SHORT; - case NPY_UINT: - return NPY_INT; - case NPY_ULONG: - return NPY_LONG; - case NPY_ULONGLONG: - return NPY_LONGLONG; - default: - return type_num; - } -} - /*NUMPY_API * Returns true if data of type 'from' may be cast to data of type @@ -789,83 +726,6 @@ static int min_scalar_type_num(char *valueptr, int type_num, int *is_small_unsigned); -/* - * NOTE: This function uses value based casting logic for scalars. It will - * require updates when we phase out value-based-casting. - */ -NPY_NO_EXPORT npy_bool -can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, - PyArray_Descr *to, NPY_CASTING casting) -{ - /* - * If the two dtypes are actually references to the same object - * or if casting type is forced unsafe then always OK. - * - * TODO: Assuming that unsafe casting always works is not actually correct - */ - if (scal_type == to || casting == NPY_UNSAFE_CASTING ) { - return 1; - } - - int valid = PyArray_CheckCastSafety(casting, scal_type, to, NPY_DTYPE(to)); - if (valid == 1) { - /* This is definitely a valid cast. */ - return 1; - } - if (valid < 0) { - /* Probably must return 0, but just keep trying for now. */ - PyErr_Clear(); - } - - /* - * If the scalar isn't a number, value-based casting cannot kick in and - * we must not attempt it. - * (Additional fast-checks would be possible, but probably unnecessary.) - */ - if (!PyTypeNum_ISNUMBER(scal_type->type_num)) { - return 0; - } - - /* - * At this point we have to check value-based casting. - */ - PyArray_Descr *dtype; - int is_small_unsigned = 0, type_num; - /* An aligned memory buffer large enough to hold any builtin numeric type */ - npy_longlong value[4]; - - int swap = !PyArray_ISNBO(scal_type->byteorder); - PyDataType_GetArrFuncs(scal_type)->copyswap(&value, scal_data, swap, NULL); - - type_num = min_scalar_type_num((char *)&value, scal_type->type_num, - &is_small_unsigned); - - /* - * If we've got a small unsigned scalar, and the 'to' type - * is not unsigned, then make it signed to allow the value - * to be cast more appropriately. - */ - if (is_small_unsigned && !(PyTypeNum_ISUNSIGNED(to->type_num))) { - type_num = type_num_unsigned_to_signed(type_num); - } - - dtype = PyArray_DescrFromType(type_num); - if (dtype == NULL) { - return 0; - } -#if 0 - printf("min scalar cast "); - PyObject_Print(dtype, stdout, 0); - printf(" to "); - PyObject_Print(to, stdout, 0); - printf("\n"); -#endif - npy_bool ret = PyArray_CanCastTypeTo(dtype, to, casting); - Py_DECREF(dtype); - return ret; -} - - NPY_NO_EXPORT npy_bool can_cast_pyscalar_scalar_to( int flags, PyArray_Descr *to, NPY_CASTING casting) @@ -897,18 +757,29 @@ can_cast_pyscalar_scalar_to( } /* - * For all other cases we use the default dtype. + * For all other cases we need to make a bit of a dance to find the cast + * safety. We do so by finding the descriptor for the "scalar" (without + * a value; for parametric user dtypes a value may be needed eventually). */ - PyArray_Descr *from; + PyArray_DTypeMeta *from_DType; + PyArray_Descr *default_dtype; if (flags & NPY_ARRAY_WAS_PYTHON_INT) { - from = PyArray_DescrFromType(NPY_LONG); + default_dtype = PyArray_DescrNewFromType(NPY_INTP); + from_DType = &PyArray_PyLongDType; } else if (flags & NPY_ARRAY_WAS_PYTHON_FLOAT) { - from = PyArray_DescrFromType(NPY_DOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_FLOAT64); + from_DType = &PyArray_PyFloatDType; } else { - from = PyArray_DescrFromType(NPY_CDOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_COMPLEX128); + from_DType = &PyArray_PyComplexDType; } + + PyArray_Descr *from = npy_find_descr_for_scalar( + NULL, default_dtype, from_DType, NPY_DTYPE(to)); + Py_DECREF(default_dtype); + int res = PyArray_CanCastTypeTo(from, to, casting); Py_DECREF(from); return res; @@ -932,25 +803,14 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, to = NULL; } - if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { - /* - * If it's a scalar, check the value. (This only currently matters for - * numeric types and for `to == NULL` it can't be numeric.) - */ - if (PyArray_NDIM(arr) == 0 && !PyArray_HASFIELDS(arr) && to != NULL) { - return can_cast_scalar_to(from, PyArray_DATA(arr), to, casting); - } - } - else { - /* - * If it's a scalar, check the value. (This only currently matters for - * numeric types and for `to == NULL` it can't be numeric.) - */ - if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) { - return can_cast_pyscalar_scalar_to( - PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to, - casting); - } + /* + * If it's a scalar, check the value. (This only currently matters for + * numeric types and for `to == NULL` it can't be numeric.) + */ + if (PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL && to != NULL) { + return can_cast_pyscalar_scalar_to( + PyArray_FLAGS(arr) & NPY_ARRAY_WAS_PYTHON_LITERAL, to, + casting); } /* Otherwise, use the standard rules (same as `PyArray_CanCastTypeTo`) */ @@ -987,11 +847,10 @@ npy_casting_to_string(NPY_CASTING casting) /** * Helper function to set a useful error when casting is not possible. * - * @param src_dtype - * @param dst_dtype - * @param casting - * @param scalar Whether this was a "scalar" cast (includes 0-D array with - * PyArray_CanCastArrayTo result). + * @param src_dtype The source descriptor to cast from + * @param dst_dtype The destination descriptor trying to cast to + * @param casting The casting rule that was violated + * @param scalar Boolean flag indicating if this was a "scalar" cast. */ NPY_NO_EXPORT void npy_set_invalid_cast_error( @@ -1030,58 +889,6 @@ PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) return (npy_bool) PyArray_CanCastSafely(fromtype, totype); } -/* - * Internal promote types function which handles unsigned integers which - * fit in same-sized signed integers specially. - */ -static PyArray_Descr * -promote_types(PyArray_Descr *type1, PyArray_Descr *type2, - int is_small_unsigned1, int is_small_unsigned2) -{ - if (is_small_unsigned1) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num2 < NPY_NTYPES_LEGACY && !(PyTypeNum_ISBOOL(type_num2) || - PyTypeNum_ISUNSIGNED(type_num2))) { - /* Convert to the equivalent-sized signed integer */ - type_num1 = type_num_unsigned_to_signed(type_num1); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else if (is_small_unsigned2) { - int type_num1 = type1->type_num; - int type_num2 = type2->type_num; - int ret_type_num; - - if (type_num1 < NPY_NTYPES_LEGACY && !(PyTypeNum_ISBOOL(type_num1) || - PyTypeNum_ISUNSIGNED(type_num1))) { - /* Convert to the equivalent-sized signed integer */ - type_num2 = type_num_unsigned_to_signed(type_num2); - - ret_type_num = _npy_type_promotion_table[type_num1][type_num2]; - /* The table doesn't handle string/unicode/void, check the result */ - if (ret_type_num >= 0) { - return PyArray_DescrFromType(ret_type_num); - } - } - - return PyArray_PromoteTypes(type1, type2); - } - else { - return PyArray_PromoteTypes(type1, type2); - } - -} - /** * This function should possibly become public API eventually. At this @@ -1576,11 +1383,19 @@ static int min_scalar_type_num(char *valueptr, int type_num, } +/*NUMPY_API + * If arr is a scalar (has 0 dimensions) with a built-in number data type, + * finds the smallest type size/kind which can still represent its data. + * Otherwise, returns the array's data type. + * + * NOTE: This API is a left over from before NumPy 2 (and NEP 50) and should + * probably be eventually deprecated and removed. + */ NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) +PyArray_MinScalarType(PyArrayObject *arr) { + int is_small_unsigned; PyArray_Descr *dtype = PyArray_DESCR(arr); - *is_small_unsigned = 0; /* * If the array isn't a numeric scalar, just return the array's dtype. */ @@ -1597,23 +1412,11 @@ PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned) return PyArray_DescrFromType( min_scalar_type_num((char *)&value, - dtype->type_num, is_small_unsigned)); + dtype->type_num, &is_small_unsigned)); } } -/*NUMPY_API - * If arr is a scalar (has 0 dimensions) with a built-in number data type, - * finds the smallest type size/kind which can still represent its data. - * Otherwise, returns the array's data type. - * - */ -NPY_NO_EXPORT PyArray_Descr * -PyArray_MinScalarType(PyArrayObject *arr) -{ - int is_small_unsigned; - return PyArray_MinScalarType_internal(arr, &is_small_unsigned); -} /* * Provides an ordering for the dtype 'kind' character codes, to help @@ -1814,14 +1617,7 @@ PyArray_ResultType( all_descriptors[i] = descrs[i]; } - int at_least_one_scalar = 0; - int all_pyscalar = ndtypes == 0; for (npy_intp i=0, i_all=ndtypes; i < narrs; i++, i_all++) { - /* Array descr is also the correct "default" for scalars: */ - if (PyArray_NDIM(arrs[i]) == 0) { - at_least_one_scalar = 1; - } - /* * If the original was a Python scalar/literal, we use only the * corresponding abstract DType (and no descriptor) below. @@ -1831,10 +1627,6 @@ PyArray_ResultType( if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) { /* This could even be an object dtype here for large ints */ all_DTypes[i_all] = &PyArray_PyLongDType; - if (PyArray_TYPE(arrs[i]) != NPY_LONG) { - /* Not a "normal" scalar, so we cannot avoid the legacy path */ - all_pyscalar = 0; - } } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) { all_DTypes[i_all] = &PyArray_PyFloatDType; @@ -1845,7 +1637,6 @@ PyArray_ResultType( else { all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); all_DTypes[i_all] = NPY_DTYPE(all_descriptors[i_all]); - all_pyscalar = 0; } Py_INCREF(all_DTypes[i_all]); } @@ -1906,24 +1697,6 @@ PyArray_ResultType( } } - /* - * Unfortunately, when 0-D "scalar" arrays are involved and mixed, we *may* - * have to use the value-based logic. - * `PyArray_CheckLegacyResultType` may behave differently based on the - * current value of `npy_legacy_promotion`: - * 1. It does nothing (we use the "new" behavior) - * 2. It does nothing, but warns if there the result would differ. - * 3. It replaces the result based on the legacy value-based logic. - */ - if (at_least_one_scalar && !all_pyscalar && result->type_num < NPY_NTYPES_LEGACY) { - if (PyArray_CheckLegacyResultType( - &result, narrs, arrs, ndtypes, descrs) < 0) { - Py_DECREF(common_dtype); - Py_DECREF(result); - return NULL; - } - } - Py_DECREF(common_dtype); PyMem_Free(info_on_heap); return result; @@ -1936,152 +1709,13 @@ PyArray_ResultType( } -/* - * Produces the result type of a bunch of inputs, using the UFunc - * type promotion rules. Use this function when you have a set of - * input arrays, and need to determine an output array dtype. - * - * If all the inputs are scalars (have 0 dimensions) or the maximum "kind" - * of the scalars is greater than the maximum "kind" of the arrays, does - * a regular type promotion. - * - * Otherwise, does a type promotion on the MinScalarType - * of all the inputs. Data types passed directly are treated as array - * types. - */ -NPY_NO_EXPORT int -PyArray_CheckLegacyResultType( - PyArray_Descr **new_result, - npy_intp narrs, PyArrayObject **arr, - npy_intp ndtypes, PyArray_Descr **dtypes) -{ - PyArray_Descr *ret = NULL; - int promotion_state = get_npy_promotion_state(); - if (promotion_state == NPY_USE_WEAK_PROMOTION) { - return 0; - } - if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings()) { - return 0; - } - - npy_intp i; - - /* If there's just one type, results must match */ - if (narrs + ndtypes == 1) { - return 0; - } - - int use_min_scalar = should_use_min_scalar(narrs, arr, ndtypes, dtypes); - - /* Loop through all the types, promoting them */ - if (!use_min_scalar) { - - /* Build a single array of all the dtypes */ - PyArray_Descr **all_dtypes = PyArray_malloc( - sizeof(*all_dtypes) * (narrs + ndtypes)); - if (all_dtypes == NULL) { - PyErr_NoMemory(); - return -1; - } - for (i = 0; i < narrs; ++i) { - all_dtypes[i] = PyArray_DESCR(arr[i]); - } - for (i = 0; i < ndtypes; ++i) { - all_dtypes[narrs + i] = dtypes[i]; - } - ret = PyArray_PromoteTypeSequence(all_dtypes, narrs + ndtypes); - PyArray_free(all_dtypes); - } - else { - int ret_is_small_unsigned = 0; - - for (i = 0; i < narrs; ++i) { - int tmp_is_small_unsigned; - PyArray_Descr *tmp = PyArray_MinScalarType_internal( - arr[i], &tmp_is_small_unsigned); - if (tmp == NULL) { - Py_XDECREF(ret); - return -1; - } - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - ret_is_small_unsigned = tmp_is_small_unsigned; - } - else { - PyArray_Descr *tmpret = promote_types( - tmp, ret, tmp_is_small_unsigned, ret_is_small_unsigned); - Py_DECREF(tmp); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return -1; - } - - ret_is_small_unsigned = tmp_is_small_unsigned && - ret_is_small_unsigned; - } - } - - for (i = 0; i < ndtypes; ++i) { - PyArray_Descr *tmp = dtypes[i]; - /* Combine it with the existing type */ - if (ret == NULL) { - ret = tmp; - Py_INCREF(ret); - } - else { - PyArray_Descr *tmpret = promote_types( - tmp, ret, 0, ret_is_small_unsigned); - Py_DECREF(ret); - ret = tmpret; - if (ret == NULL) { - return -1; - } - } - } - /* None of the above loops ran */ - if (ret == NULL) { - PyErr_SetString(PyExc_TypeError, - "no arrays or types available to calculate result type"); - } - } - - if (ret == NULL) { - return -1; - } - - int unchanged_result = PyArray_EquivTypes(*new_result, ret); - if (unchanged_result) { - Py_DECREF(ret); - return 0; - } - - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - Py_SETREF(*new_result, ret); - return 0; - } - - assert(promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); - if (PyErr_WarnFormat(PyExc_UserWarning, 1, - "result dtype changed due to the removal of value-based " - "promotion from NumPy. Changed from %S to %S.", - ret, *new_result) < 0) { - Py_DECREF(ret); - return -1; - } - Py_DECREF(ret); - return 0; -} - /** * Promotion of descriptors (of arbitrary DType) to their correctly * promoted instances of the given DType. * I.e. the given DType could be a string, which then finds the correct * string length, given all `descrs`. * - * @param ndescrs number of descriptors to cast and find the common instance. + * @param ndescr number of descriptors to cast and find the common instance. * At least one must be passed in. * @param descrs The descriptors to work with. * @param DType The DType of the desired output descriptor. @@ -2386,7 +2020,7 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn) * Private function to add a casting implementation by unwrapping a bound * array method. * - * @param meth + * @param meth The array method to be unwrapped * @return 0 on success -1 on failure. */ NPY_NO_EXPORT int @@ -2438,7 +2072,12 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) /** * Add a new casting implementation using a PyArrayMethod_Spec. * - * @param spec + * Using this function outside of module initialization without holding a + * critical section on the castingimpls dict may lead to a race to fill the + * dict. Use PyArray_GetGastingImpl to lazily register casts at runtime + * safely. + * + * @param spec The specification to use as a source * @param private If private, allow slots not publicly exposed. * @return 0 on success -1 on failure */ @@ -2810,6 +2449,11 @@ cast_to_string_resolve_descriptors( return -1; } if (dtypes[1]->type_num == NPY_UNICODE) { + if (size > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", size); + return -1; + } size *= 4; } diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index f848ad3b4c8e..5dc6b4deacb6 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -9,19 +9,6 @@ extern "C" { extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; -#define NPY_USE_LEGACY_PROMOTION 0 -#define NPY_USE_WEAK_PROMOTION 1 -#define NPY_USE_WEAK_PROMOTION_AND_WARN 2 - -NPY_NO_EXPORT int -npy_give_promotion_warnings(void); - -NPY_NO_EXPORT PyObject * -npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)); - -NPY_NO_EXPORT PyObject * -npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg); - NPY_NO_EXPORT PyObject * PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to); @@ -53,11 +40,6 @@ PyArray_ValidType(int type); NPY_NO_EXPORT int dtype_kind_to_ordering(char kind); -/* Used by PyArray_CanCastArrayTo and in the legacy ufunc type resolution */ -NPY_NO_EXPORT npy_bool -can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data, - PyArray_Descr *to, NPY_CASTING casting); - NPY_NO_EXPORT npy_bool can_cast_pyscalar_scalar_to( int flags, PyArray_Descr *to, NPY_CASTING casting); @@ -133,12 +115,6 @@ simple_cast_resolve_descriptors( NPY_NO_EXPORT int PyArray_InitializeCasts(void); -NPY_NO_EXPORT int -get_npy_promotion_state(); - -NPY_NO_EXPORT void -set_npy_promotion_state(int new_promotion_state); - #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 5c1a78daf0c5..b6a935e419a6 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1416,7 +1416,7 @@ _array_from_buffer_3118(PyObject *memoryview) * * an object with an __array__ function. * * @param op The object to convert to an array - * @param requested_type a requested dtype instance, may be NULL; The result + * @param requested_dtype a requested dtype instance, may be NULL; The result * DType may be used, but is not enforced. * @param writeable whether the result must be writeable. * @param context Unused parameter, must be NULL (should be removed later). @@ -1839,7 +1839,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { PyArray_DESCR_REPLACE(in_descr); } - if (in_descr && in_descr->byteorder != NPY_IGNORE) { + if (in_descr && in_descr->byteorder != NPY_IGNORE && in_descr->byteorder != NPY_NATIVE) { in_descr->byteorder = NPY_NATIVE; } } @@ -2036,13 +2036,12 @@ PyArray_FromStructInterface(PyObject *input) PyObject *attr; char endian = NPY_NATBYTE; - attr = PyArray_LookupSpecial_OnInstance(input, npy_interned_str.array_struct); - if (attr == NULL) { - if (PyErr_Occurred()) { - return NULL; - } else { - return Py_NotImplemented; - } + if (PyArray_LookupSpecial_OnInstance( + input, npy_interned_str.array_struct, &attr) < 0) { + return NULL; + } + else if (attr == NULL) { + return Py_NotImplemented; } if (!PyCapsule_CheckExact(attr)) { if (PyType_Check(input) && PyObject_HasAttrString(attr, "__get__")) { @@ -2160,12 +2159,11 @@ PyArray_FromInterface(PyObject *origin) npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; - iface = PyArray_LookupSpecial_OnInstance(origin, npy_interned_str.array_interface); - - if (iface == NULL) { - if (PyErr_Occurred()) { - return NULL; - } + if (PyArray_LookupSpecial_OnInstance( + origin, npy_interned_str.array_interface, &iface) < 0) { + return NULL; + } + else if (iface == NULL) { return Py_NotImplemented; } if (!PyDict_Check(iface)) { @@ -2234,8 +2232,8 @@ PyArray_FromInterface(PyObject *origin) Py_SETREF(dtype, new_dtype); } } + Py_DECREF(descr); } - Py_DECREF(descr); } Py_CLEAR(attr); @@ -2478,7 +2476,10 @@ check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) Py_XDECREF(traceback); if (DEPRECATE("__array__ implementation doesn't accept a copy keyword, " "so passing copy=False failed. __array__ must implement " - "'dtype' and 'copy' keyword arguments.") < 0) { + "'dtype' and 'copy' keyword arguments. " + "To learn more, see the migration guide " + "https://numpy.org/devdocs/numpy_2_0_migration_guide.html" + "#adapting-to-changes-in-the-copy-keyword") < 0) { return -1; } return 0; @@ -2515,11 +2516,11 @@ PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, PyObject *new; PyObject *array_meth; - array_meth = PyArray_LookupSpecial_OnInstance(op, npy_interned_str.array); - if (array_meth == NULL) { - if (PyErr_Occurred()) { - return NULL; - } + if (PyArray_LookupSpecial_OnInstance( + op, npy_interned_str.array, &array_meth) < 0) { + return NULL; + } + else if (array_meth == NULL) { return Py_NotImplemented; } diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 474c048db6cf..42daa39cbfd1 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -2818,85 +2818,232 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta) } /* - * Converts a timedelta into a PyObject *. + * We require that if d is a PyDateTime, then + * hash(numpy.datetime64(d)) == hash(d). + * Where possible, convert dt to a PyDateTime and hash it. * - * Not-a-time is returned as the string "NaT". - * For microseconds or coarser, returns a datetime.timedelta. - * For units finer than microseconds, returns an integer. + * NOTE: "equals" across PyDate, PyDateTime and np.datetime64 is not transitive: + * datetime.datetime(1970, 1, 1) == np.datetime64(0, 'us') + * np.datetime64(0, 'us') == np.datetime64(0, 'D') + * datetime.datetime(1970, 1, 1) != np.datetime64(0, 'D') # date, not datetime! + * + * But: + * datetime.date(1970, 1, 1) == np.datetime64(0, 'D') + * + * For hash(datetime64(0, 'D')) we could return either PyDate.hash or PyDateTime.hash. + * We choose PyDateTime.hash to match datetime64(0, 'us') */ -NPY_NO_EXPORT PyObject * -convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) +NPY_NO_EXPORT npy_hash_t +datetime_hash(PyArray_DatetimeMetaData *meta, npy_datetime dt) { - npy_timedelta value; - int days = 0, seconds = 0, useconds = 0; + PyObject *obj; + npy_hash_t res; + npy_datetimestruct dts; - /* - * Convert NaT (not-a-time) into None. - */ - if (td == NPY_DATETIME_NAT) { - Py_RETURN_NONE; + if (dt == NPY_DATETIME_NAT) { + return -1; /* should have been handled by caller */ } - /* - * If the type's precision is greater than microseconds, is - * Y/M/B (nonlinear units), or is generic units, return an int - */ - if (meta->base > NPY_FR_us || - meta->base == NPY_FR_Y || - meta->base == NPY_FR_M || - meta->base == NPY_FR_GENERIC) { - return PyLong_FromLongLong(td); + if (meta->base == NPY_FR_GENERIC) { + obj = PyLong_FromLongLong(dt); + } else { + if (NpyDatetime_ConvertDatetime64ToDatetimeStruct(meta, dt, &dts) < 0) { + return -1; + } + + if (dts.year < 1 || dts.year > 9999 + || dts.ps != 0 || dts.as != 0) { + /* NpyDatetime_ConvertDatetime64ToDatetimeStruct does memset, + * so this is safe from loose struct packing. */ + obj = PyBytes_FromStringAndSize((const char *)&dts, sizeof(dts)); + } else { + obj = PyDateTime_FromDateAndTime(dts.year, dts.month, dts.day, + dts.hour, dts.min, dts.sec, dts.us); + } + } + + if (obj == NULL) { + return -1; } - value = td; + res = PyObject_Hash(obj); + + Py_DECREF(obj); + + return res; +} + +static int +convert_timedelta_to_timedeltastruct(PyArray_DatetimeMetaData *meta, + npy_timedelta td, + npy_timedeltastruct *out) +{ + memset(out, 0, sizeof(npy_timedeltastruct)); /* Apply the unit multiplier (TODO: overflow treatment...) */ - value *= meta->num; + td *= meta->num; /* Convert to days/seconds/useconds */ switch (meta->base) { case NPY_FR_W: - days = value * 7; + out->day = td * 7; break; case NPY_FR_D: - days = value; + out->day = td; break; case NPY_FR_h: - days = extract_unit_64(&value, 24ULL); - seconds = value*60*60; + out->day = extract_unit_64(&td, 24LL); + out->sec = (npy_int32)(td * 60*60); break; case NPY_FR_m: - days = extract_unit_64(&value, 60ULL*24); - seconds = value*60; + out->day = extract_unit_64(&td, 60LL*24); + out->sec = (npy_int32)(td * 60); break; case NPY_FR_s: - days = extract_unit_64(&value, 60ULL*60*24); - seconds = value; + out->day = extract_unit_64(&td, 60LL*60*24); + out->sec = (npy_int32)td; break; case NPY_FR_ms: - days = extract_unit_64(&value, 1000ULL*60*60*24); - seconds = extract_unit_64(&value, 1000ULL); - useconds = value*1000; + out->day = extract_unit_64(&td, 1000LL*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL); + out->us = (npy_int32)(td * 1000LL); break; case NPY_FR_us: - days = extract_unit_64(&value, 1000ULL*1000*60*60*24); - seconds = extract_unit_64(&value, 1000ULL*1000); - useconds = value; + out->day = extract_unit_64(&td, 1000LL*1000*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000); + out->us = (npy_int32)td; break; - default: - // unreachable, handled by the `if` above - assert(NPY_FALSE); + case NPY_FR_ns: + out->day = extract_unit_64(&td, 1000LL*1000*1000*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL); + out->ps = (npy_int32)(td * 1000LL); + break; + case NPY_FR_ps: + out->day = extract_unit_64(&td, 1000LL*1000*1000*1000*60*60*24); + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL*1000); + out->ps = (npy_int32)td; + break; + case NPY_FR_fs: + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000); + out->ps = (npy_int32)extract_unit_64(&td, 1000LL); + out->as = (npy_int32)(td * 1000LL); break; + case NPY_FR_as: + out->sec = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000*1000*1000); + out->us = (npy_int32)extract_unit_64(&td, 1000LL*1000*1000*1000); + out->ps = (npy_int32)extract_unit_64(&td, 1000LL*1000); + out->as = (npy_int32)td; + break; + default: + PyErr_SetString(PyExc_RuntimeError, + "NumPy timedelta metadata is corrupted with invalid " + "base unit"); + return -1; + } + + return 0; +} + +/* + * Converts a timedelta into a PyObject *. + * + * Not-a-time is returned as the string "NaT". + * For microseconds or coarser, returns a datetime.timedelta. + * For units finer than microseconds, returns an integer. + */ +NPY_NO_EXPORT PyObject * +convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta) +{ + npy_timedeltastruct tds; + + /* + * Convert NaT (not-a-time) into None. + */ + if (td == NPY_DATETIME_NAT) { + Py_RETURN_NONE; + } + + /* + * If the type's precision is greater than microseconds, is + * Y/M/B (nonlinear units), or is generic units, return an int + */ + if (meta->base > NPY_FR_us || + meta->base == NPY_FR_Y || + meta->base == NPY_FR_M || + meta->base == NPY_FR_GENERIC) { + return PyLong_FromLongLong(td); + } + + if (convert_timedelta_to_timedeltastruct(meta, td, &tds) < 0) { + return NULL; } + /* * If it would overflow the datetime.timedelta days, return a raw int */ - if (days < -999999999 || days > 999999999) { + if (tds.day < -999999999 || tds.day > 999999999) { return PyLong_FromLongLong(td); } else { - return PyDelta_FromDSU(days, seconds, useconds); + return PyDelta_FromDSU(tds.day, tds.sec, tds.us); + } +} + +/* + * We require that if d is a PyDelta, then + * hash(numpy.timedelta64(d)) == hash(d). + * Where possible, convert dt to a PyDelta and hash it. + */ +NPY_NO_EXPORT npy_hash_t +timedelta_hash(PyArray_DatetimeMetaData *meta, npy_timedelta td) +{ + PyObject *obj; + npy_hash_t res; + npy_timedeltastruct tds; + + if (td == NPY_DATETIME_NAT) { + return -1; /* should have been handled by caller */ + } + + if (meta->base == NPY_FR_GENERIC) { + /* generic compares equal to *every* other base, so no single hash works. */ + PyErr_SetString(PyExc_ValueError, "Can't hash generic timedelta64"); + return -1; } + + /* Y and M can be converted to each other but not to other units */ + + if (meta->base == NPY_FR_Y) { + obj = PyLong_FromLongLong(td * 12); + } else if (meta->base == NPY_FR_M) { + obj = PyLong_FromLongLong(td); + } else { + if (convert_timedelta_to_timedeltastruct(meta, td, &tds) < 0) { + return -1; + } + + if (tds.day < -999999999 || tds.day > 999999999 + || tds.ps != 0 || tds.as != 0) { + /* convert_timedelta_to_timedeltastruct does memset, + * so this is safe from loose struct packing. */ + obj = PyBytes_FromStringAndSize((const char *)&tds, sizeof(tds)); + } else { + obj = PyDelta_FromDSU(tds.day, tds.sec, tds.us); + } + } + + if (obj == NULL) { + return -1; + } + + res = PyObject_Hash(obj); + + Py_DECREF(obj); + + return res; } /* diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index a47a71d39196..006a5504f728 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -29,6 +29,7 @@ #include "npy_buffer.h" #include "dtypemeta.h" #include "stringdtype/dtype.h" +#include "array_coercion.h" #ifndef PyDictProxy_Check #define PyDictProxy_Check(obj) (Py_TYPE(obj) == &PyDictProxy_Type) @@ -273,8 +274,16 @@ _convert_from_tuple(PyObject *obj, int align) if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { + if (type->type_num == NPY_UNICODE) { + if (itemsize > NPY_MAX_INT / 4) { + itemsize = -1; + } + else { + itemsize *= 4; + } + } + if (itemsize < 0) { + /* Error may or may not be set by PyIntAsInt. */ PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); Py_DECREF(type); @@ -284,12 +293,8 @@ _convert_from_tuple(PyObject *obj, int align) if (type == NULL) { return NULL; } - if (type->type_num == NPY_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } + + type->elsize = itemsize; return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { @@ -1409,7 +1414,8 @@ PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) * TODO: This function should eventually receive a deprecation warning and * be removed. * - * @param descr + * @param descr descriptor to be checked + * @param DType pointer to the DType of the descriptor * @return 1 if this is not a concrete dtype instance 0 otherwise */ static int @@ -1441,9 +1447,9 @@ descr_is_legacy_parametric_instance(PyArray_Descr *descr, * both results can be NULL (if the input is). But it always sets the DType * when a descriptor is set. * - * @param dtype - * @param out_descr - * @param out_DType + * @param dtype Input descriptor to be converted + * @param out_descr Output descriptor + * @param out_DType DType of the output descriptor * @return 0 on success -1 on failure */ NPY_NO_EXPORT int @@ -1470,7 +1476,7 @@ PyArray_ExtractDTypeAndDescriptor(PyArray_Descr *dtype, * Converter function filling in an npy_dtype_info struct on success. * * @param obj representing a dtype instance (descriptor) or DType class. - * @param[out] npy_dtype_info filled with the DType class and dtype/descriptor + * @param[out] dt_info npy_dtype_info filled with the DType class and dtype/descriptor * instance. The class is always set while the instance may be NULL. * On error, both will be NULL. * @return 0 on failure and 1 on success (as a converter) @@ -1522,7 +1528,7 @@ PyArray_DTypeOrDescrConverterRequired(PyObject *obj, npy_dtype_info *dt_info) * NULL anyway). * * @param obj None or obj representing a dtype instance (descr) or DType class. - * @param[out] npy_dtype_info filled with the DType class and dtype/descriptor + * @param[out] dt_info filled with the DType class and dtype/descriptor * instance. If `obj` is None, is not modified. Otherwise the class * is always set while the instance may be NULL. * On error, both will be NULL. @@ -1599,6 +1605,10 @@ _convert_from_type(PyObject *obj) { return PyArray_DescrFromType(NPY_OBJECT); } else { + PyObject *DType = PyArray_DiscoverDTypeFromScalarType(typ); + if (DType != NULL) { + return PyArray_GetDefaultDescr((PyArray_DTypeMeta *)DType); + } PyArray_Descr *ret = _try_convert_from_dtype_attr(obj); if ((PyObject *)ret != Py_NotImplemented) { return ret; @@ -1855,7 +1865,10 @@ _convert_from_str(PyObject *obj, int align) */ case NPY_UNICODELTR: check_num = NPY_UNICODE; - elsize <<= 2; + if (elsize > (NPY_MAX_INT / 4)) { + goto fail; + } + elsize *= 4; break; case NPY_VOIDLTR: diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 51cb454b3a66..14fbc36c3bff 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -57,7 +57,7 @@ array_dlpack_deleter_unversioned(DLManagedTensor *self) /* - * Deleter for a DLPack capsule wrapping a DLManagedTensor(Versioed). + * Deleter for a DLPack capsule wrapping a DLManagedTensor(Versioned). * * This is exactly as mandated by dlpack */ diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 7cd8afbed6d8..b936f4dc213e 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -1615,7 +1615,8 @@ typedef struct Dragon4_Options { * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 + +static npy_int32 FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -1646,7 +1647,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = '-'; has_sign = 1; } - + numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, digit_mode, cutoff_mode, precision, min_digits, buffer + has_sign, maxPrintLen - has_sign, @@ -1658,14 +1659,14 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, /* if output has a whole number */ if (printExponent >= 0) { /* leave the whole number at the start of the buffer */ - numWholeDigits = printExponent+1; + numWholeDigits = printExponent+1; if (numDigits <= numWholeDigits) { npy_int32 count = numWholeDigits - numDigits; pos += numDigits; - /* don't overflow the buffer */ - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } /* add trailing zeros up to the decimal point */ @@ -1767,9 +1768,12 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, pos < maxPrintLen) { /* add trailing zeros up to add_digits length */ /* compute the number of trailing zeros needed */ + npy_int32 count = desiredFractionalDigits - numFractionDigits; - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } numFractionDigits += count; @@ -1802,7 +1806,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } /* add any whitespace padding to right side */ - if (digits_right >= numFractionDigits) { + if (digits_right >= numFractionDigits) { npy_int32 count = digits_right - numFractionDigits; /* in trim_mode DptZeros, if right padding, add a space for the . */ @@ -1811,8 +1815,9 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = ' '; } - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } for ( ; count > 0; count--) { @@ -1823,14 +1828,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, if (digits_left > numWholeDigits + has_sign) { npy_int32 shift = digits_left - (numWholeDigits + has_sign); npy_int32 count = pos; - - if (count + shift > maxPrintLen) { - count = maxPrintLen - shift; + + if (count > maxPrintLen - shift) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } if (count > 0) { memmove(buffer + shift, buffer, count); } + pos = shift + count; for ( ; shift > 0; shift--) { buffer[shift - 1] = ' '; @@ -1860,7 +1867,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 +static npy_int32 FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -2158,7 +2165,7 @@ PrintInfNan(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, * Helper function that takes Dragon4 parameters and options and * calls Dragon4. */ -static npy_uint32 +static npy_int32 Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, Dragon4_Options *opt) @@ -2187,7 +2194,7 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * exponent: 5 bits * mantissa: 10 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary16( npy_half *value, Dragon4_Options *opt) { @@ -2274,7 +2281,7 @@ Dragon4_PrintFloat_IEEE_binary16( * exponent: 8 bits * mantissa: 23 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary32( npy_float32 *value, Dragon4_Options *opt) @@ -2367,7 +2374,7 @@ Dragon4_PrintFloat_IEEE_binary32( * exponent: 11 bits * mantissa: 52 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary64( npy_float64 *value, Dragon4_Options *opt) { @@ -2482,7 +2489,7 @@ typedef struct FloatVal128 { * intbit 1 bit, first u64 * mantissa: 63 bits, first u64 */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended( FloatVal128 value, Dragon4_Options *opt) { @@ -2580,7 +2587,7 @@ Dragon4_PrintFloat_Intel_extended( * system. But numpy defines NPY_FLOAT80, so if we come across it, assume it is * an Intel extended format. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended80( npy_float80 *value, Dragon4_Options *opt) { @@ -2604,7 +2611,7 @@ Dragon4_PrintFloat_Intel_extended80( #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 96-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended96( npy_float96 *value, Dragon4_Options *opt) { @@ -2628,7 +2635,7 @@ Dragon4_PrintFloat_Intel_extended96( #ifdef HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE /* Motorola Big-endian equivalent of the Intel-extended 96 fp format */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Motorola_extended96( npy_float96 *value, Dragon4_Options *opt) { @@ -2665,7 +2672,7 @@ typedef union FloatUnion128 #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 128-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended128( npy_float128 *value, Dragon4_Options *opt) { @@ -2694,7 +2701,7 @@ Dragon4_PrintFloat_Intel_extended128( * I am not sure if the arch also supports uint128, and C does not seem to * support int128 literals. So we use uint64 to do manipulation. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128( FloatVal128 val128, Dragon4_Options *opt) { @@ -2779,7 +2786,7 @@ Dragon4_PrintFloat_IEEE_binary128( } #if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_le( npy_float128 *value, Dragon4_Options *opt) { @@ -2799,7 +2806,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( * This function is untested, very few, if any, architectures implement * big endian IEEE binary128 floating point. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_be( npy_float128 *value, Dragon4_Options *opt) { @@ -2854,7 +2861,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( * https://gcc.gnu.org/wiki/Ieee128PowerPCA * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IBM_double_double( npy_float128 *value, Dragon4_Options *opt) { @@ -3041,6 +3048,7 @@ Dragon4_PrintFloat_IBM_double_double( * which goes up to about 10^4932. The Dragon4_scratch struct provides a string * buffer of this size. */ + #define make_dragon4_typefuncs_inner(Type, npy_type, format) \ \ PyObject *\ diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 244b47250786..0b1b0fb39192 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -374,7 +374,7 @@ dtypemeta_initialize_struct_from_spec( * if the Py_TPFLAGS_HEAPTYPE flag is set (they are created from Python). * They are not for legacy DTypes or np.dtype itself. * - * @param self + * @param dtype_class Pointer to the Python type object * @return nonzero if the object is garbage collected */ static inline int @@ -494,12 +494,14 @@ string_discover_descr_from_pyobject( itemsize = PyUnicode_GetLength(obj); } if (itemsize != -1) { - if (cls->type_num == NPY_UNICODE) { - itemsize *= 4; - } - if (itemsize > NPY_MAX_INT) { + if (itemsize > NPY_MAX_INT || ( + cls->type_num == NPY_UNICODE && itemsize > NPY_MAX_INT / 4)) { PyErr_SetString(PyExc_TypeError, "string too large to store inside array."); + return NULL; + } + if (cls->type_num == NPY_UNICODE) { + itemsize *= 4; } PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num); if (res == NULL) { @@ -1250,28 +1252,34 @@ dtypemeta_wrap_legacy_descriptor( return -1; } } + else { + // ensure the within dtype cast is populated for legacy user dtypes + if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { + return -1; + } + } return 0; } static PyObject * -dtypemeta_get_abstract(PyArray_DTypeMeta *self) { +dtypemeta_get_abstract(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_abstract(self)); } static PyObject * -dtypemeta_get_legacy(PyArray_DTypeMeta *self) { +dtypemeta_get_legacy(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_legacy(self)); } static PyObject * -dtypemeta_get_parametric(PyArray_DTypeMeta *self) { +dtypemeta_get_parametric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_parametric(self)); } static PyObject * -dtypemeta_get_is_numeric(PyArray_DTypeMeta *self) { +dtypemeta_get_is_numeric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_numeric(self)); } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 344b440b38e8..8b3abbeb1883 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -80,7 +80,7 @@ typedef struct { PyObject *castingimpls; /* - * Storage for `descr->f`, since we may need to allow some customizatoin + * Storage for `descr->f`, since we may need to allow some customization * here at least in a transition period and we need to set it on every * dtype instance for backward compatibility. (Keep this at end) */ @@ -285,6 +285,11 @@ PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) v, itemptr, arr); } +// Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew +#define PyArray_DESCR_REPLACE_CANONICAL(descr) do { \ + PyArray_Descr *_new_ = NPY_DT_CALL_ensure_canonical(descr); \ + Py_XSETREF(descr, _new_); \ + } while(0) #endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ */ diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 4d98ce0c350c..fbcc0f7b162c 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -785,21 +785,21 @@ static NPY_GCC_OPT_3 inline int npy_fastrepeat_impl( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { npy_intp i, j, k; for (i = 0; i < n_outer; i++) { for (j = 0; j < n; j++) { npy_intp tmp = broadcast ? counts[0] : counts[j]; for (k = 0; k < tmp; k++) { - if (!needs_refcounting) { + if (!needs_custom_copy) { memcpy(new_data, old_data, chunk); } else { char *data[2] = {old_data, new_data}; npy_intp strides[2] = {elsize, elsize}; - if (cast_info.func(&cast_info.context, data, &nel, - strides, cast_info.auxdata) < 0) { + if (cast_info->func(&cast_info->context, data, &nel, + strides, cast_info->auxdata) < 0) { return -1; } } @@ -811,48 +811,53 @@ npy_fastrepeat_impl( return 0; } + +/* + * Helper to allow the compiler to specialize for all direct element copy + * cases (e.g. all numerical dtypes). + */ static NPY_GCC_OPT_3 int npy_fastrepeat( npy_intp n_outer, npy_intp n, npy_intp nel, npy_intp chunk, npy_bool broadcast, npy_intp* counts, char* new_data, char* old_data, - npy_intp elsize, NPY_cast_info cast_info, int needs_refcounting) + npy_intp elsize, NPY_cast_info *cast_info, int needs_custom_copy) { - if (!needs_refcounting) { + if (!needs_custom_copy) { if (chunk == 1) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 2) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 4) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 8) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 16) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } if (chunk == 32) { return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, - elsize, cast_info, needs_refcounting); + elsize, cast_info, needs_custom_copy); } } return npy_fastrepeat_impl( n_outer, n, nel, chunk, broadcast, counts, new_data, old_data, elsize, - cast_info, needs_refcounting); + cast_info, needs_custom_copy); } @@ -872,7 +877,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) char *new_data, *old_data; NPY_cast_info cast_info; NPY_ARRAYMETHOD_FLAGS flags; - int needs_refcounting; repeats = (PyArrayObject *)PyArray_ContiguousFromAny(op, NPY_INTP, 0, 1); if (repeats == NULL) { @@ -897,7 +901,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) aop = (PyArrayObject *)ap; n = PyArray_DIM(aop, axis); NPY_cast_info_init(&cast_info); - needs_refcounting = PyDataType_REFCHK(PyArray_DESCR(aop)); if (!broadcast && PyArray_SIZE(repeats) != n) { PyErr_Format(PyExc_ValueError, @@ -919,16 +922,23 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) } } + /* Fill in dimensions of new array */ + npy_intp dims[NPY_MAXDIMS] = {0}; + + for (int i = 0; i < PyArray_NDIM(aop); i++) { + dims[i] = PyArray_DIMS(aop)[i]; + } + + dims[axis] = total; + /* Construct new array */ - PyArray_DIMS(aop)[axis] = total; Py_INCREF(PyArray_DESCR(aop)); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(aop), PyArray_DESCR(aop), PyArray_NDIM(aop), - PyArray_DIMS(aop), + dims, NULL, NULL, 0, (PyObject *)aop); - PyArray_DIMS(aop)[axis] = n; if (ret == NULL) { goto fail; } @@ -947,16 +957,18 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) n_outer *= PyArray_DIMS(aop)[i]; } - if (needs_refcounting) { + int needs_custom_copy = 0; + if (PyDataType_REFCHK(PyArray_DESCR(ret))) { + needs_custom_copy = 1; if (PyArray_GetDTypeTransferFunction( - 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(aop), 0, + 1, elsize, elsize, PyArray_DESCR(aop), PyArray_DESCR(ret), 0, &cast_info, &flags) < 0) { goto fail; } } if (npy_fastrepeat(n_outer, n, nel, chunk, broadcast, counts, new_data, - old_data, elsize, cast_info, needs_refcounting) < 0) { + old_data, elsize, &cast_info, needs_custom_copy) < 0) { goto fail; } @@ -2009,8 +2021,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis) } rcode = argsort(its[j]->dataptr, (npy_intp *)rit->dataptr, N, mps[j]); - if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j])) - && PyErr_Occurred())) { + if (rcode < 0 || (object && PyErr_Occurred())) { goto fail; } PyArray_ITER_NEXT(its[j]); diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 2806670d3e07..c3b6500f69d0 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -136,7 +136,6 @@ PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao) nd = PyArray_NDIM(ao); /* The legacy iterator only supports 32 dimensions */ assert(nd <= NPY_MAXDIMS_LEGACY_ITERS); - PyArray_UpdateFlags(ao, NPY_ARRAY_C_CONTIGUOUS); if (PyArray_ISCONTIGUOUS(ao)) { it->contiguous = 1; } diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index 4a6c1f093769..d11fbb7ff870 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -263,13 +263,13 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n) * * Checks everything but the bounds. * - * @param the array being indexed - * @param the index object - * @param index info struct being filled (size of NPY_MAXDIMS * 2 + 1) - * @param number of indices found - * @param dimension of the indexing result - * @param dimension of the fancy/advanced indices part - * @param whether to allow the boolean special case + * @param self the array being indexed + * @param index the index object + * @param indices index info struct being filled (size of NPY_MAXDIMS * 2 + 1) + * @param num number of indices found + * @param ndim dimension of the indexing result + * @param out_fancy_ndim dimension of the fancy/advanced indices part + * @param allow_boolean whether to allow the boolean special case * * @returns the index_type or -1 on failure and fills the number of indices. */ @@ -782,10 +782,10 @@ index_has_memory_overlap(PyArrayObject *self, * The caller must ensure that the index is a full integer * one. * - * @param Array being indexed - * @param result pointer - * @param parsed index information - * @param number of indices + * @param self Array being indexed + * @param ptr result pointer + * @param indices parsed index information + * @param index_num number of indices * * @return 0 on success -1 on failure */ @@ -814,11 +814,12 @@ get_item_pointer(PyArrayObject *self, char **ptr, * Ensure_array allows to fetch a safe subspace view for advanced * indexing. * - * @param Array being indexed - * @param resulting array (new reference) - * @param parsed index information - * @param number of indices - * @param Whether result should inherit the type from self + * @param self Array being indexed + * @param view Resulting array (new reference) + * @param indices parsed index information + * @param index_num number of indices + * @param ensure_array true if result should be a base class array, + * false if result should inherit type from self * * @return 0 on success -1 on failure */ @@ -1057,7 +1058,7 @@ array_boolean_subscript(PyArrayObject *self, ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( Py_TYPE(self), ret_dtype, 1, &size, PyArray_STRIDES(ret), PyArray_BYTES(ret), - PyArray_FLAGS(self), (PyObject *)self, (PyObject *)tmp); + PyArray_FLAGS(ret), (PyObject *)self, (PyObject *)tmp); Py_DECREF(tmp); if (ret == NULL) { @@ -1667,7 +1668,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(1, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR(mit->extra_op), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } @@ -2034,7 +2035,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) goto fail; } - int allocated_array = 0; if (tmp_arr == NULL) { /* Fill extra op, need to swap first */ tmp_arr = mit->extra_op; @@ -2048,7 +2048,11 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } - allocated_array = 1; + /* + * In this branch we copy directly from a newly allocated array which + * may have a new descr: + */ + descr = PyArray_DESCR(tmp_arr); } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2096,8 +2100,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. if (PyArray_GetDTypeTransferFunction( 1, itemsize, itemsize, - allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), - PyArray_DESCR(self), + descr, PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } @@ -2412,10 +2415,10 @@ PyArray_MapIterNext(PyArrayMapIterObject *mit) * * mit->dimensions: Broadcast dimension of the fancy indices and * the subspace iteration dimension. * - * @param MapIterObject - * @param The parsed indices object - * @param Number of indices - * @param The array that is being iterated + * @param mit pointer to the MapIterObject + * @param indices The parsed indices object + * @param index_num Number of indices + * @param arr The array that is being iterated * * @return 0 on success -1 on failure (broadcasting or too many fancy indices) */ diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 4a8e1ea4579e..7f5bd29809a3 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -888,28 +888,39 @@ array_finalizearray(PyArrayObject *self, PyObject *obj) } +/* + * Default `__array_wrap__` implementation. + * + * If `self` is not a base class, we always create a new view, even if + * `return_scalar` is set. This way we preserve the (presumably important) + * subclass information. + * If the type is a base class array, we honor `return_scalar` and call + * PyArray_Return to convert any array with ndim=0 to scalar. + * + * By default, do not return a scalar (because this was always the default). + */ static PyObject * array_wraparray(PyArrayObject *self, PyObject *args) { PyArrayObject *arr; - PyObject *obj; + PyObject *UNUSED = NULL; /* for the context argument */ + int return_scalar = 0; - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument"); - return NULL; - } - obj = PyTuple_GET_ITEM(args, 0); - if (obj == NULL) { + if (!PyArg_ParseTuple(args, "O!|OO&:__array_wrap__", + &PyArray_Type, &arr, &UNUSED, + &PyArray_OptionalBoolConverter, &return_scalar)) { return NULL; } - if (!PyArray_Check(obj)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; + + if (return_scalar && Py_TYPE(self) == &PyArray_Type && PyArray_NDIM(arr) == 0) { + /* Strict scalar return here (but go via PyArray_Return anyway) */ + Py_INCREF(arr); + return PyArray_Return(arr); } - arr = (PyArrayObject *)obj; + /* + * Return an array, but should ensure it has the type of self + */ if (Py_TYPE(self) != Py_TYPE(arr)) { PyArray_Descr *dtype = PyArray_DESCR(arr); Py_INCREF(dtype); @@ -919,7 +930,7 @@ array_wraparray(PyArrayObject *self, PyObject *args) PyArray_NDIM(arr), PyArray_DIMS(arr), PyArray_STRIDES(arr), PyArray_DATA(arr), - PyArray_FLAGS(arr), (PyObject *)self, obj); + PyArray_FLAGS(arr), (PyObject *)self, (PyObject *)arr); } else { /* @@ -1120,7 +1131,14 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw &func, &types, &args, &kwargs)) { return NULL; } - + if (!PyTuple_CheckExact(args)) { + PyErr_SetString(PyExc_TypeError, "args must be a tuple."); + return NULL; + } + if (!PyDict_CheckExact(kwargs)) { + PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); + return NULL; + } types = PySequence_Fast( types, "types argument to ndarray.__array_function__ must be iterable"); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index e02743693212..d337a84e9baf 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -157,12 +157,13 @@ PyArray_GetPriority(PyObject *obj, double default_) return NPY_SCALAR_PRIORITY; } - ret = PyArray_LookupSpecial_OnInstance(obj, npy_interned_str.array_priority); - if (ret == NULL) { - if (PyErr_Occurred()) { - /* TODO[gh-14801]: propagate crashes during attribute access? */ - PyErr_Clear(); - } + if (PyArray_LookupSpecial_OnInstance( + obj, npy_interned_str.array_priority, &ret) < 0) { + /* TODO[gh-14801]: propagate crashes during attribute access? */ + PyErr_Clear(); + return default_; + } + else if (ret == NULL) { return default_; } @@ -2703,13 +2704,13 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t len_ar } static int -einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, - PyArrayObject **op) +einsum_sub_op_from_str( + Py_ssize_t nargs, PyObject *const *args, + PyObject **str_obj, char **subscripts, PyArrayObject **op) { - int i, nop; + Py_ssize_t nop = nargs - 1; PyObject *subscripts_str; - nop = PyTuple_GET_SIZE(args) - 1; if (nop <= 0) { PyErr_SetString(PyExc_ValueError, "must specify the einstein sum subscripts string " @@ -2722,7 +2723,7 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, } /* Get the subscripts string */ - subscripts_str = PyTuple_GET_ITEM(args, 0); + subscripts_str = args[0]; if (PyUnicode_Check(subscripts_str)) { *str_obj = PyUnicode_AsASCIIString(subscripts_str); if (*str_obj == NULL) { @@ -2739,15 +2740,13 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, } /* Set the operands to NULL */ - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { op[i] = NULL; } /* Get the operands */ - for (i = 0; i < nop; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, i+1); - - op[i] = (PyArrayObject *)PyArray_FROM_OF(obj, NPY_ARRAY_ENSUREARRAY); + for (Py_ssize_t i = 0; i < nop; ++i) { + op[i] = (PyArrayObject *)PyArray_FROM_OF(args[i+1], NPY_ARRAY_ENSUREARRAY); if (op[i] == NULL) { goto fail; } @@ -2756,7 +2755,7 @@ einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts, return nop; fail: - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { Py_XDECREF(op[i]); op[i] = NULL; } @@ -2860,13 +2859,12 @@ einsum_list_to_subscripts(PyObject *obj, char *subscripts, int subsize) * Returns -1 on error, number of operands placed in op otherwise. */ static int -einsum_sub_op_from_lists(PyObject *args, - char *subscripts, int subsize, PyArrayObject **op) +einsum_sub_op_from_lists(Py_ssize_t nargs, PyObject *const *args, + char *subscripts, int subsize, PyArrayObject **op) { int subindex = 0; - npy_intp i, nop; - nop = PyTuple_Size(args)/2; + Py_ssize_t nop = nargs / 2; if (nop == 0) { PyErr_SetString(PyExc_ValueError, "must provide at least an " @@ -2879,15 +2877,12 @@ einsum_sub_op_from_lists(PyObject *args, } /* Set the operands to NULL */ - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { op[i] = NULL; } /* Get the operands and build the subscript string */ - for (i = 0; i < nop; ++i) { - PyObject *obj = PyTuple_GET_ITEM(args, 2*i); - int n; - + for (Py_ssize_t i = 0; i < nop; ++i) { /* Comma between the subscripts for each operand */ if (i != 0) { subscripts[subindex++] = ','; @@ -2898,14 +2893,13 @@ einsum_sub_op_from_lists(PyObject *args, } } - op[i] = (PyArrayObject *)PyArray_FROM_OF(obj, NPY_ARRAY_ENSUREARRAY); + op[i] = (PyArrayObject *)PyArray_FROM_OF(args[2*i], NPY_ARRAY_ENSUREARRAY); if (op[i] == NULL) { goto fail; } - obj = PyTuple_GET_ITEM(args, 2*i+1); - n = einsum_list_to_subscripts(obj, subscripts+subindex, - subsize-subindex); + int n = einsum_list_to_subscripts( + args[2*i + 1], subscripts+subindex, subsize-subindex); if (n < 0) { goto fail; } @@ -2913,10 +2907,7 @@ einsum_sub_op_from_lists(PyObject *args, } /* Add the '->' to the string if provided */ - if (PyTuple_Size(args) == 2*nop+1) { - PyObject *obj; - int n; - + if (nargs == 2*nop+1) { if (subindex + 2 >= subsize) { PyErr_SetString(PyExc_ValueError, "subscripts list is too long"); @@ -2925,9 +2916,8 @@ einsum_sub_op_from_lists(PyObject *args, subscripts[subindex++] = '-'; subscripts[subindex++] = '>'; - obj = PyTuple_GET_ITEM(args, 2*nop); - n = einsum_list_to_subscripts(obj, subscripts+subindex, - subsize-subindex); + int n = einsum_list_to_subscripts( + args[2*nop], subscripts+subindex, subsize-subindex); if (n < 0) { goto fail; } @@ -2940,7 +2930,7 @@ einsum_sub_op_from_lists(PyObject *args, return nop; fail: - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { Py_XDECREF(op[i]); op[i] = NULL; } @@ -2949,36 +2939,39 @@ einsum_sub_op_from_lists(PyObject *args, } static PyObject * -array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) +array_einsum(PyObject *NPY_UNUSED(dummy), + PyObject *const *args, Py_ssize_t nargsf, PyObject *kwnames) { char *subscripts = NULL, subscripts_buffer[256]; PyObject *str_obj = NULL, *str_key_obj = NULL; - PyObject *arg0; - int i, nop; + int nop; PyArrayObject *op[NPY_MAXARGS]; NPY_ORDER order = NPY_KEEPORDER; NPY_CASTING casting = NPY_SAFE_CASTING; + PyObject *out_obj = NULL; PyArrayObject *out = NULL; PyArray_Descr *dtype = NULL; PyObject *ret = NULL; + NPY_PREPARE_ARGPARSER; + + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); - if (PyTuple_GET_SIZE(args) < 1) { + if (nargs < 1) { PyErr_SetString(PyExc_ValueError, "must specify the einstein sum subscripts string " "and at least one operand, or at least one operand " "and its corresponding subscripts list"); return NULL; } - arg0 = PyTuple_GET_ITEM(args, 0); /* einsum('i,j', a, b), einsum('i,j->ij', a, b) */ - if (PyBytes_Check(arg0) || PyUnicode_Check(arg0)) { - nop = einsum_sub_op_from_str(args, &str_obj, &subscripts, op); + if (PyBytes_Check(args[0]) || PyUnicode_Check(args[0])) { + nop = einsum_sub_op_from_str(nargs, args, &str_obj, &subscripts, op); } /* einsum(a, [0], b, [1]), einsum(a, [0], b, [1], [0,1]) */ else { - nop = einsum_sub_op_from_lists(args, subscripts_buffer, - sizeof(subscripts_buffer), op); + nop = einsum_sub_op_from_lists(nargs, args, subscripts_buffer, + sizeof(subscripts_buffer), op); subscripts = subscripts_buffer; } if (nop <= 0) { @@ -2986,63 +2979,26 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } /* Get the keyword arguments */ - if (kwds != NULL) { - PyObject *key, *value; - Py_ssize_t pos = 0; - while (PyDict_Next(kwds, &pos, &key, &value)) { - char *str = NULL; - - Py_XDECREF(str_key_obj); - str_key_obj = PyUnicode_AsASCIIString(key); - if (str_key_obj != NULL) { - key = str_key_obj; - } - - str = PyBytes_AsString(key); - - if (str == NULL) { - PyErr_Clear(); - PyErr_SetString(PyExc_TypeError, "invalid keyword"); - goto finish; - } - - if (strcmp(str,"out") == 0) { - if (PyArray_Check(value)) { - out = (PyArrayObject *)value; - } - else { - PyErr_SetString(PyExc_TypeError, - "keyword parameter out must be an " - "array for einsum"); - goto finish; - } - } - else if (strcmp(str,"order") == 0) { - if (!PyArray_OrderConverter(value, &order)) { - goto finish; - } - } - else if (strcmp(str,"casting") == 0) { - if (!PyArray_CastingConverter(value, &casting)) { - goto finish; - } - } - else if (strcmp(str,"dtype") == 0) { - if (!PyArray_DescrConverter2(value, &dtype)) { - goto finish; - } - } - else { - PyErr_Format(PyExc_TypeError, - "'%s' is an invalid keyword for einsum", - str); - goto finish; - } + if (kwnames != NULL) { + if (npy_parse_arguments("einsum", args+nargs, 0, kwnames, + "$out", NULL, &out_obj, + "$order", &PyArray_OrderConverter, &order, + "$casting", &PyArray_CastingConverter, &casting, + "$dtype", &PyArray_DescrConverter2, &dtype, + NULL, NULL, NULL) < 0) { + goto finish; + } + if (out_obj != NULL && !PyArray_Check(out_obj)) { + PyErr_SetString(PyExc_TypeError, + "keyword parameter out must be an " + "array for einsum"); + goto finish; } + out = (PyArrayObject *)out_obj; } ret = (PyObject *)PyArray_EinsteinSum(subscripts, nop, op, dtype, - order, casting, out); + order, casting, out); /* If no output was supplied, possibly convert to a scalar */ if (ret != NULL && out == NULL) { @@ -3050,7 +3006,7 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) } finish: - for (i = 0; i < nop; ++i) { + for (Py_ssize_t i = 0; i < nop; ++i) { Py_XDECREF(op[i]); } Py_XDECREF(dtype); @@ -3548,30 +3504,18 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), * TODO: `PyArray_IsScalar` should not be required for new dtypes. * weak-promotion branch is in practice identical to dtype one. */ - if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { - PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); - if (descr == NULL) { - goto finish; - } - if (!PyArray_DescrCheck(descr)) { - Py_DECREF(descr); - PyErr_SetString(PyExc_TypeError, - "numpy_scalar.dtype did not return a dtype instance."); - goto finish; - } - ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); - Py_DECREF(descr); + PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); + if (descr == NULL) { + goto finish; } - else { - /* need to convert to object to consider old value-based logic */ - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FROM_O(from_obj); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); + if (!PyArray_DescrCheck(descr)) { + Py_DECREF(descr); + PyErr_SetString(PyExc_TypeError, + "numpy_scalar.dtype did not return a dtype instance."); + goto finish; } + ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); + Py_DECREF(descr); } else if (PyArray_IsPythonNumber(from_obj)) { PyErr_SetString(PyExc_TypeError, @@ -4517,7 +4461,7 @@ static struct PyMethodDef array_module_methods[] = { METH_FASTCALL, NULL}, {"c_einsum", (PyCFunction)array_einsum, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL|METH_KEYWORDS, NULL}, {"correlate", (PyCFunction)array_correlate, METH_FASTCALL | METH_KEYWORDS, NULL}, @@ -4626,14 +4570,6 @@ static struct PyMethodDef array_module_methods[] = { {"get_handler_version", (PyCFunction) get_handler_version, METH_VARARGS, NULL}, - {"_get_promotion_state", - (PyCFunction)npy__get_promotion_state, - METH_NOARGS, "Get the current NEP 50 promotion state."}, - {"_set_promotion_state", - (PyCFunction)npy__set_promotion_state, - METH_O, "Set the NEP 50 promotion state. This is not thread-safe.\n" - "The optional warnings can be safely silenced using the \n" - "`np._no_nep50_warning()` context manager."}, {"_set_numpy_warn_if_no_mem_policy", (PyCFunction)_set_numpy_warn_if_no_mem_policy, METH_O, "Change the warn if no mem policy flag for testing."}, @@ -5097,6 +5033,24 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + /* + * Initialize the default PyDataMem_Handler capsule singleton. + */ + PyDataMem_DefaultHandler = PyCapsule_New( + &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); + if (PyDataMem_DefaultHandler == NULL) { + goto err; + } + + /* + * Initialize the context-local current handler + * with the default PyDataMem_Handler capsule. + */ + current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); + if (current_handler == NULL) { + goto err; + } + if (initumath(m) != 0) { goto err; } @@ -5131,7 +5085,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - + if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { @@ -5145,23 +5099,6 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); - /* - * Initialize the default PyDataMem_Handler capsule singleton. - */ - PyDataMem_DefaultHandler = PyCapsule_New( - &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); - if (PyDataMem_DefaultHandler == NULL) { - goto err; - } - /* - * Initialize the context-local current handler - * with the default PyDataMem_Handler capsule. - */ - current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); - if (current_handler == NULL) { - goto err; - } - // initialize static reference to a zero-like array npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); diff --git a/numpy/_core/src/multiarray/nditer_constr.c b/numpy/_core/src/multiarray/nditer_constr.c index 427dd3d876bc..ab1a540cb283 100644 --- a/numpy/_core/src/multiarray/nditer_constr.c +++ b/numpy/_core/src/multiarray/nditer_constr.c @@ -1315,8 +1315,10 @@ npyiter_check_casting(int nop, PyArrayObject **op, printf("\n"); #endif /* If the types aren't equivalent, a cast is necessary */ - if (op[iop] != NULL && !PyArray_EquivTypes(PyArray_DESCR(op[iop]), - op_dtype[iop])) { + npy_intp view_offset = NPY_MIN_INTP; + if (op[iop] != NULL && !(PyArray_SafeCast( + PyArray_DESCR(op[iop]), op_dtype[iop], &view_offset, + NPY_NO_CASTING, 1) && view_offset == 0)) { /* Check read (op -> temp) casting */ if ((op_itflags[iop] & NPY_OP_ITFLAG_READ) && !PyArray_CanCastArrayTo(op[iop], diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 38f8b5ebd119..2cc6ea72c26e 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -63,6 +63,7 @@ intern_strings(void) INTERN_STRING(__dlpack__, "__dlpack__"); INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); INTERN_STRING(legacy, "legacy"); + INTERN_STRING(__doc__, "__doc__"); return 0; } diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 277e4be1eaff..d6ee4a8dc54d 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ #define NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int initialize_static_globals(void); @@ -38,6 +42,7 @@ typedef struct npy_interned_str_struct { PyObject *__dlpack__; PyObject *pyvals_name; PyObject *legacy; + PyObject *__doc__; } npy_interned_str_struct; /* @@ -167,4 +172,8 @@ NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; +#ifdef __cplusplus +} +#endif + #endif // NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index f537d2b68e41..e6c04c1c9a9c 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -755,13 +755,10 @@ _array_nonzero(PyArrayObject *mp) return res; } else if (n == 0) { - /* 2017-09-25, 1.14 */ - if (DEPRECATE("The truth value of an empty array is ambiguous. " - "Returning False, but in future this will result in an error. " - "Use `array.size > 0` to check that an array is not empty.") < 0) { - return -1; - } - return 0; + PyErr_SetString(PyExc_ValueError, + "The truth value of an empty array is ambiguous. " + "Use `array.size > 0` to check that an array is not empty."); + return -1; } else { PyErr_SetString(PyExc_ValueError, diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index 0da40cbdc60e..571b50372684 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -83,14 +83,16 @@ PyArray_ZeroContiguousBuffer( if (get_fill_zero_loop( NULL, descr, aligned, descr->elsize, &(zero_info.func), &(zero_info.auxdata), &flags_unused) < 0) { - goto fail; + return -1; } } else { + assert(zero_info.func == NULL); + } + if (zero_info.func == NULL) { /* the multiply here should never overflow, since we already checked if the new array size doesn't overflow */ memset(data, 0, size*stride); - NPY_traverse_info_xfree(&zero_info); return 0; } @@ -98,10 +100,6 @@ PyArray_ZeroContiguousBuffer( NULL, descr, data, size, stride, zero_info.auxdata); NPY_traverse_info_xfree(&zero_info); return res; - - fail: - NPY_traverse_info_xfree(&zero_info); - return -1; } diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index 9ca83d8a57f5..84638bc640be 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -390,6 +390,12 @@ PyArray_DescrFromTypeObject(PyObject *type) Py_INCREF(type); return (PyArray_Descr *)new; } + + PyObject *DType = PyArray_DiscoverDTypeFromScalarType((PyTypeObject *)type); + if (DType != NULL) { + return PyArray_GetDefaultDescr((PyArray_DTypeMeta *)DType); + } + return _descr_from_subtype(type); } diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 689e16730cc0..36919a492472 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -25,6 +25,7 @@ #include "numpyos.h" #include "can_cast_table.h" #include "common.h" +#include "conversion_utils.h" #include "flagsobject.h" #include "scalartypes.h" #include "_datetime.h" @@ -209,15 +210,15 @@ find_binary_operation_path( * our ufuncs without preventing recursion. * It may be nice to avoid double lookup in `BINOP_GIVE_UP_IF_NEEDED`. */ - PyObject *attr = PyArray_LookupSpecial(other, npy_interned_str.array_ufunc); - if (attr != NULL) { + PyObject *attr; + if (PyArray_LookupSpecial(other, npy_interned_str.array_ufunc, &attr) < 0) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + } + else if (attr != NULL) { Py_DECREF(attr); *other_op = Py_NewRef(other); return 0; } - else if (PyErr_Occurred()) { - PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ - } /* * Now check `other`. We want to know whether it is an object scalar @@ -253,7 +254,7 @@ find_binary_operation_path( * * However, NumPy (historically) made this often work magically because * ufuncs for object dtype end up casting to object with `.item()`. This in - * turn ofthen returns a Python type (e.g. float for float32, float64)! + * turn often returns a Python type (e.g. float for float32, float64)! * Retrying then succeeds. So if (and only if) `self.item()` returns a new * type, we can safely attempt the operation (again) with that. */ @@ -2035,29 +2036,33 @@ gentype_getarray(PyObject *scalar, PyObject *args) return ret; } -static char doc_sc_wraparray[] = "sc.__array_wrap__(obj) return scalar from array"; +static char doc_sc_wraparray[] = "__array_wrap__ implementation for scalar types"; +/* + * __array_wrap__ for scalars, returning a scalar if possible. + * (note that NumPy itself may well never call this itself). + */ static PyObject * gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args) { - PyObject *obj; PyArrayObject *arr; + PyObject *UNUSED = NULL; /* for the context argument */ + /* return_scalar should be passed, but we're scalar, so return scalar by default */ + int return_scalar = 1; - if (PyTuple_Size(args) < 1) { - PyErr_SetString(PyExc_TypeError, - "only accepts 1 argument."); + if (!PyArg_ParseTuple(args, "O!|OO&:__array_wrap__", + &PyArray_Type, &arr, &UNUSED, + &PyArray_OptionalBoolConverter, &return_scalar)) { return NULL; } - obj = PyTuple_GET_ITEM(args, 0); - if (!PyArray_Check(obj)) { - PyErr_SetString(PyExc_TypeError, - "can only be called with ndarray object"); - return NULL; - } - arr = (PyArrayObject *)obj; - return PyArray_Scalar(PyArray_DATA(arr), - PyArray_DESCR(arr), (PyObject *)arr); + Py_INCREF(arr); + if (!return_scalar) { + return (PyObject *)arr; + } + else { + return PyArray_Return(arr); + } } /* @@ -3904,45 +3909,26 @@ static inline npy_hash_t * #lname = datetime, timedelta# * #name = Datetime, Timedelta# */ -#if NPY_SIZEOF_HASH_T==NPY_SIZEOF_DATETIME static npy_hash_t @lname@_arrtype_hash(PyObject *obj) { - npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, @name@)); - if (x == -1) { - x = -2; - } - return x; -} -#elif NPY_SIZEOF_LONGLONG==NPY_SIZEOF_DATETIME -static npy_hash_t -@lname@_arrtype_hash(PyObject *obj) -{ - npy_hash_t y; - npy_longlong x = (PyArrayScalar_VAL(obj, @name@)); + PyArray_DatetimeMetaData *meta; + PyArray_Descr *dtype; + npy_@lname@ val = PyArrayScalar_VAL(obj, @name@); - if ((x <= LONG_MAX)) { - y = (npy_hash_t) x; + if (val == NPY_DATETIME_NAT) { + /* Use identity, similar to NaN */ + return PyBaseObject_Type.tp_hash(obj); } - else { - union Mask { - long hashvals[2]; - npy_longlong v; - } both; - both.v = x; - y = both.hashvals[0] + (1000003)*both.hashvals[1]; - } - if (y == -1) { - y = -2; - } - return y; + dtype = PyArray_DescrFromScalar(obj); + meta = get_datetime_metadata_from_dtype(dtype); + + return @lname@_hash(meta, val); } -#endif /**end repeat**/ - /* Wrong thing to do for longdouble, but....*/ /**begin repeat diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 81a846bf6d96..cb8265dd3d7a 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -270,6 +270,15 @@ as_pystring(PyObject *scalar, int coerce) "string coercion is disabled."); return NULL; } + else if (scalar_type == &PyBytes_Type) { + // assume UTF-8 encoding + char *buffer; + Py_ssize_t length; + if (PyBytes_AsStringAndSize(scalar, &buffer, &length) < 0) { + return NULL; + } + return PyUnicode_FromStringAndSize(buffer, length); + } else { // attempt to coerce to str scalar = PyObject_Str(scalar); diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 8fe13d0d3532..c459fa826e53 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -6,6 +6,7 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "alloc.h" #include @@ -58,13 +59,16 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; + int error = 0; + Py_BEGIN_CRITICAL_SECTION(converters); while (PyDict_Next(converters, &pos, &key, &value)) { Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "keys of the converters dictionary must be integers; " "got %.100R", key); - goto error; + error = 1; + break; } if (usecols != NULL) { /* @@ -92,7 +96,8 @@ create_conv_funcs( PyErr_Format(PyExc_ValueError, "converter specified for column %zd, which is invalid " "for the number of fields %zd.", column, num_fields); - goto error; + error = 1; + break; } if (column < 0) { column += num_fields; @@ -102,11 +107,18 @@ create_conv_funcs( PyErr_Format(PyExc_TypeError, "values of the converters dictionary must be callable, " "but the value associated with key %R is not", key); - goto error; + error = 1; + break; } Py_INCREF(value); conv_funcs[column] = value; } + Py_END_CRITICAL_SECTION(); + + if (error) { + goto error; + } + return conv_funcs; error: @@ -142,8 +154,6 @@ create_conv_funcs( * @param out_descr The dtype used for allocating a new array. This is not * used if `data_array` is provided. Note that the actual dtype of the * returned array can differ for strings. - * @param num_cols Pointer in which the actual (discovered) number of columns - * is returned. This is only relevant if `homogeneous` is true. * @param homogeneous Whether the datatype of the array is not homogeneous, * i.e. not structured. In this case the number of columns has to be * discovered an the returned array will be 2-dimensional rather than diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 8d90f5cc968f..445f7ad7fe67 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -618,8 +618,8 @@ legacy_userdtype_common_dtype_function( * used for legacy user-dtypes, but for example numeric to/from datetime * casts were only defined that way as well. * - * @param from - * @param to + * @param from Source DType + * @param to Destination DType * @param casting If `NPY_NO_CASTING` will check the legacy registered cast, * otherwise uses the provided cast. */ diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 194a81e2d7e9..645055537d87 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -1,6 +1,4 @@ #include "highway_qsort.hpp" -#define VQSORT_ONLY_STATIC 1 -#include "hwy/contrib/sort/vqsort-inl.h" #if VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index ba3fe4920594..77cd9f085943 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,18 +1,13 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#define VQSORT_ONLY_STATIC 1 #include "hwy/highway.h" +#include "hwy/contrib/sort/vqsort-inl.h" #include "common.hpp" -// This replicates VQSORT_ENABLED from hwy/contrib/sort/shared-inl.h -// without checking the scalar target as this is not built within the dynamic -// dispatched sources. -#if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_CLANG && \ - (HWY_IS_HWASAN || HWY_IS_MSAN || HWY_IS_TSAN || HWY_IS_ASAN)) +#if !VQSORT_COMPILER_COMPATIBLE #define NPY_DISABLE_HIGHWAY_SORT #endif diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index d069cb6373d0..d151de2b5e62 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -1,6 +1,4 @@ #include "highway_qsort.hpp" -#define VQSORT_ONLY_STATIC 1 -#include "hwy/contrib/sort/vqsort-inl.h" #include "quicksort.hpp" diff --git a/numpy/_core/src/npysort/selection.cpp b/numpy/_core/src/npysort/selection.cpp index 225e932ac122..5106cab7757c 100644 --- a/numpy/_core/src/npysort/selection.cpp +++ b/numpy/_core/src/npysort/selection.cpp @@ -258,7 +258,7 @@ unguarded_partition_(type *v, npy_intp *tosort, const type pivot, npy_intp *ll, /* * select median of median of blocks of 5 * if used as partition pivot it splits the range into at least 30%/70% - * allowing linear time worstcase quickselect + * allowing linear time worst-case quickselect */ template static npy_intp diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index 90b7e147d50a..8edbdc00b6f3 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -133,8 +133,8 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) import_umath(); add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1, - PyUFunc_None, "add_triplet", - "add_triplet_docstring", 0); + PyUFunc_None, "add_triplet", + NULL, 0); dtype_dict = Py_BuildValue("[(s, s), (s, s), (s, s)]", "f0", "u8", "f1", "u8", "f2", "u8"); diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.cpp similarity index 94% rename from numpy/_core/src/umath/dispatching.c rename to numpy/_core/src/umath/dispatching.cpp index 110e2f40ab32..87b16cc176b8 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.cpp @@ -38,6 +38,9 @@ #define _MULTIARRAYMODULE #define _UMATHMODULE +#include +#include + #define PY_SSIZE_T_CLEAN #include #include @@ -213,7 +216,7 @@ PyUFunc_AddLoopFromSpec_int(PyObject *ufunc, PyArrayMethod_Spec *spec, int priv) * both are `(f4, f4, f8)`. The cache would need to store also which * output was provided by `dtype=`/`signature=`. * - * @param ufunc + * @param ufunc The universal function to be resolved * @param op_dtypes The DTypes that are either passed in (defined by an * operand) or defined by the `signature` as also passed in as * `fixed_DTypes`. @@ -504,8 +507,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyObject *promoter = PyTuple_GET_ITEM(info, 1); if (PyCapsule_CheckExact(promoter)) { /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); + PyArrayMethod_PromoterFunction *promoter_function = + (PyArrayMethod_PromoterFunction *)PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); if (promoter_function == NULL) { return NULL; } @@ -770,8 +774,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * 2. Check all registered loops/promoters to find the best match. * 3. Fall back to the legacy implementation if no match was found. */ - PyObject *info = PyArrayIdentityHash_GetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes); + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { /* Found the ArrayMethod and NOT a promoter: return it */ @@ -793,8 +798,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; @@ -815,8 +821,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } else if (info != NULL) { /* Add result to the cache using the original types: */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; @@ -882,13 +889,51 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } /* Add this to the cache using the original types: */ - if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (cacheable && PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; } +#ifdef Py_GIL_DISABLED +/* + * Fast path for promote_and_get_info_and_ufuncimpl. + * Acquires a read lock to check for a cache hit and then + * only acquires a write lock on a cache miss to fill the cache + */ +static inline PyObject * +promote_and_get_info_and_ufuncimpl_with_locking( + PyUFuncObject *ufunc, + PyArrayObject *const ops[], + PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *op_dtypes[], + npy_bool legacy_promotion_is_possible) +{ + std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); + mutex->lock_shared(); + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); + mutex->unlock_shared(); + + if (info != NULL && PyObject_TypeCheck( + PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { + /* Found the ArrayMethod and NOT a promoter: return it */ + return info; + } + + // cache miss, need to acquire a write lock and recursively calculate the + // correct dispatch resolution + mutex->lock(); + info = promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); + mutex->unlock(); + + return info; +} +#endif /** * The central entry-point for the promotion and dispatching machinery. @@ -941,6 +986,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, { int nin = ufunc->nin, nargs = ufunc->nargs; npy_bool legacy_promotion_is_possible = NPY_TRUE; + PyObject *all_dtypes = NULL; + PyArrayMethodObject *method = NULL; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -976,55 +1023,20 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } - int current_promotion_state = get_npy_promotion_state(); - - if (force_legacy_promotion && legacy_promotion_is_possible - && current_promotion_state == NPY_USE_LEGACY_PROMOTION - && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { - /* - * We must use legacy promotion for value-based logic. Call the old - * resolver once up-front to get the "actual" loop dtypes. - * After this (additional) promotion, we can even use normal caching. - */ - int cacheable = 1; /* unused, as we modify the original `op_dtypes` */ - if (legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) { - goto handle_error; - } - } - - /* Pause warnings and always use "new" path */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); +#ifdef Py_GIL_DISABLED + PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); +#else PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); - set_npy_promotion_state(current_promotion_state); +#endif if (info == NULL) { goto handle_error; } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); - - /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) - && (force_legacy_promotion || promoting_pyscalars) - && npy_give_promotion_warnings()) { - PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; - for (int i = 0; i < nargs; i++) { - check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( - all_dtypes, i); - } - /* Before calling to the legacy promotion, pretend that is the state: */ - set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); - int res = legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, check_dtypes, NULL, NPY_TRUE); - /* Reset the promotion state: */ - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); - if (res < 0) { - goto handle_error; - } - } + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); /* * In certain cases (only the logical ufuncs really), the loop we found may @@ -1255,7 +1267,7 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (dtype_tuple == NULL) { return -1; } - PyObject *promoter = PyCapsule_New(&logical_ufunc_promoter, + PyObject *promoter = PyCapsule_New((void *)&logical_ufunc_promoter, "numpy._ufunc_promoter", NULL); if (promoter == NULL) { Py_DECREF(dtype_tuple); diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index 9bb5fbd9b013..95bcb32bf0ce 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -43,6 +43,10 @@ object_only_ufunc_promoter(PyObject *ufunc, NPY_NO_EXPORT int install_logical_ufunc_promoter(PyObject *ufunc); +NPY_NO_EXPORT PyObject * +get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, + int ndtypes); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index ab830d52e9ab..0b8cc1f0a5ac 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -315,7 +315,7 @@ abs_ptrdiff(char *a, char *b) /* * stride is equal to element size and input and destination are equal or * don't overlap within one register. The check of the steps against - * esize also quarantees that steps are >= 0. + * esize also guarantees that steps are >= 0. */ #define IS_BLOCKABLE_UNARY(esize, vsize) \ (steps[0] == (esize) && steps[0] == steps[1] && \ diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 9592df0e1366..705262fedd38 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -311,7 +311,7 @@ get_initial_from_ufunc( } } else if (context->descriptors[0]->type_num == NPY_OBJECT - && !reduction_is_empty) { + && !reduction_is_empty) { /* Allows `sum([object()])` to work, but use 0 when empty. */ Py_DECREF(identity_obj); return 0; @@ -323,13 +323,6 @@ get_initial_from_ufunc( return -1; } - if (PyTypeNum_ISNUMBER(context->descriptors[0]->type_num)) { - /* For numbers we can cache to avoid going via Python ints */ - memcpy(context->method->legacy_initial, initial, - context->descriptors[0]->elsize); - context->method->get_reduction_initial = ©_cached_initial; - } - /* Reduction can use the initial value */ return 1; } @@ -427,11 +420,47 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, }; PyBoundArrayMethodObject *bound_res = PyArrayMethod_FromSpec_int(&spec, 1); + if (bound_res == NULL) { return NULL; } PyArrayMethodObject *res = bound_res->method; + + // set cached initial value for numeric reductions to avoid creating + // a python int in every reduction + if (PyTypeNum_ISNUMBER(bound_res->dtypes[0]->type_num) && + ufunc->nin == 2 && ufunc->nout == 1) { + + PyArray_Descr *descrs[3]; + + for (int i = 0; i < 3; i++) { + // only dealing with numeric legacy dtypes so this should always be + // valid + descrs[i] = bound_res->dtypes[i]->singleton; + } + + PyArrayMethod_Context context = { + (PyObject *)ufunc, + bound_res->method, + descrs, + }; + + int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); + + if (ret < 0) { + Py_DECREF(bound_res); + return NULL; + } + + // only use the cached initial value if it's valid + if (ret > 0) { + context.method->get_reduction_initial = ©_cached_initial; + } + } + + Py_INCREF(res); Py_DECREF(bound_res); + return res; } diff --git a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src index 21e01c115a7d..9defead3075d 100644 --- a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src @@ -22,7 +22,7 @@ * current one kinda slow and it can be optimized by * at least avoiding the division and keep sqrt. * - Vectorize reductions - * - Add support for ASIMD/VCMLA through universal intrinics. + * - Add support for ASIMD/VCMLA through universal intrinsics. */ //############################################################################### diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index a4acc4437b1b..190ea6b8be72 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1074,10 +1074,14 @@ AVX512F_log_DOUBLE(npy_double * op, _mm512_mask_storeu_pd(op, load_mask, res); } - /* call glibc's log func when x around 1.0f */ + /* call glibc's log func when x around 1.0f. */ if (glibc_mask != 0) { double NPY_DECL_ALIGNED(64) ip_fback[8]; - _mm512_store_pd(ip_fback, x_in); + /* Using a mask_store_pd instead of store_pd to prevent a fatal + * compiler optimization bug. See + * https://github.com/numpy/numpy/issues/27745#issuecomment-2498684564 + * for details.*/ + _mm512_mask_store_pd(ip_fback, avx512_get_full_load_mask_pd(), x_in); for (int ii = 0; ii < 8; ++ii, glibc_mask >>= 1) { if (glibc_mask & 0x01) { diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp index 1bc6ecfb14d6..ae696db4cd4a 100644 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -1,7 +1,8 @@ -#include "simd/simd.h" -#include "loops_utils.h" -#include "loops.h" #include "fast_loop_macros.h" +#include "loops.h" +#include "loops_utils.h" + +#include "simd/simd.h" #include namespace hn = hwy::HWY_NAMESPACE; @@ -31,8 +32,7 @@ namespace hn = hwy::HWY_NAMESPACE; */ #if NPY_SIMD_FMA3 // native support -typedef enum -{ +typedef enum { SIMD_COMPUTE_SIN, SIMD_COMPUTE_COS } SIMD_TRIG_OP; @@ -44,7 +44,8 @@ using vec_s32 = hn::Vec; using opmask_t = hn::Mask; HWY_INLINE HWY_ATTR vec_f32 -simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f32& c2, const vec_f32& c3) +simd_range_reduction_f32(vec_f32 &x, vec_f32 &y, const vec_f32 &c1, + const vec_f32 &c2, const vec_f32 &c3) { vec_f32 reduced_x = hn::MulAdd(y, c1, x); reduced_x = hn::MulAdd(y, c2, reduced_x); @@ -53,7 +54,7 @@ simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f3 } HWY_INLINE HWY_ATTR vec_f32 -simd_cosine_poly_f32(vec_f32& x2) +simd_cosine_poly_f32(vec_f32 &x2) { const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); const vec_f32 invf6 = hn::Set(f32, -0x1.6c06dcp-10f); @@ -73,7 +74,7 @@ simd_cosine_poly_f32(vec_f32& x2) * Polynomial approximation based on unpublished work by T. Myklebust */ HWY_INLINE HWY_ATTR vec_f32 -simd_sine_poly_f32(vec_f32& x, vec_f32& x2) +simd_sine_poly_f32(vec_f32 &x, vec_f32 &x2) { const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); const vec_f32 invf7 = hn::Set(f32, -0x1.a06bbap-13f); @@ -94,8 +95,8 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, { // Load up frequently used constants const vec_f32 zerosf = hn::Zero(f32); - const vec_s32 ones = hn::Set(s32, 1); - const vec_s32 twos = hn::Set(s32, 2); + const vec_s32 ones = hn::Set(s32, 1); + const vec_s32 twos = hn::Set(s32, 2); const vec_f32 two_over_pi = hn::Set(f32, 0x1.45f306p-1f); const vec_f32 codyw_pio2_highf = hn::Set(f32, -0x1.921fb0p+00f); const vec_f32 codyw_pio2_medf = hn::Set(f32, -0x1.5110b4p-22f); @@ -112,11 +113,12 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, const vec_s32 src_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, ssrc)); const vec_s32 dst_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, sdst)); - for (; len > 0; len -= lanes, src += ssrc*lanes, dst += sdst*lanes) { + for (; len > 0; len -= lanes, src += ssrc * lanes, dst += sdst * lanes) { vec_f32 x_in; if (ssrc == 1) { x_in = hn::LoadN(f32, src, len); - } else { + } + else { x_in = hn::GatherIndexN(f32, src, src_index, len); } opmask_t nnan_mask = hn::Not(hn::IsNaN(x_in)); @@ -129,7 +131,8 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, * these numbers */ if (!hn::AllFalse(f32, simd_mask)) { - vec_f32 x = hn::IfThenElse(hn::And(nnan_mask, simd_mask), x_in, zerosf); + vec_f32 x = hn::IfThenElse(hn::And(nnan_mask, simd_mask), x_in, + zerosf); vec_f32 quadrant = hn::Mul(x, two_over_pi); // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 @@ -137,9 +140,9 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, quadrant = hn::Sub(quadrant, rint_cvt_magic); // Cody-Waite's range reduction algorithm - vec_f32 reduced_x = simd_range_reduction_f32( - x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf - ); + vec_f32 reduced_x = + simd_range_reduction_f32(x, quadrant, codyw_pio2_highf, + codyw_pio2_medf, codyw_pio2_lowf); vec_f32 reduced_x2 = hn::Mul(reduced_x, reduced_x); // compute cosine and sine @@ -151,23 +154,36 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, iquadrant = hn::Add(iquadrant, ones); } // blend sin and cos based on the quadrant - opmask_t sine_mask = hn::RebindMask(f32, hn::Eq(hn::And(iquadrant, ones), hn::Zero(s32))); + opmask_t sine_mask = hn::RebindMask( + f32, hn::Eq(hn::And(iquadrant, ones), hn::Zero(s32))); cos = hn::IfThenElse(sine_mask, sin, cos); // multiply by -1 for appropriate elements - opmask_t negate_mask = hn::RebindMask(f32, hn::Eq(hn::And(iquadrant, twos), twos)); + opmask_t negate_mask = hn::RebindMask( + f32, hn::Eq(hn::And(iquadrant, twos), twos)); cos = hn::MaskedSubOr(cos, negate_mask, zerosf, cos); cos = hn::IfThenElse(nnan_mask, cos, hn::Set(f32, NPY_NANF)); if (sdst == 1) { hn::StoreN(cos, f32, dst, len); - } else { + } + else { hn::ScatterIndexN(cos, f32, dst, dst_index, len); } } if (!hn::AllTrue(f32, simd_mask)) { + static_assert(hn::MaxLanes(f32) <= 64, + "The following fallback is not applicable for " + "SIMD widths larger than 2048 bits, or for scalable " + "SIMD in general."); npy_uint64 simd_maski; - hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); + hn::StoreMaskBits(f32, simd_mask, (uint8_t *)&simd_maski); +#if HWY_IS_BIG_ENDIAN + static_assert(hn::MaxLanes(f32) <= 8, + "This conversion is not supported for SIMD widths " + "larger than 256 bits."); + simd_maski = ((uint8_t *)&simd_maski)[0]; +#endif float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; hn::Store(x_in, f32, ip_fback); @@ -177,7 +193,7 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, if ((simd_maski >> i) & 1) { continue; } - dst[sdst*i] = npy_cosf(ip_fback[i]); + dst[sdst * i] = npy_cosf(ip_fback[i]); } } else { @@ -185,85 +201,95 @@ simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, if ((simd_maski >> i) & 1) { continue; } - dst[sdst*i] = npy_sinf(ip_fback[i]); + dst[sdst * i] = npy_sinf(ip_fback[i]); } } } - npyv_cleanup(); + npyv_cleanup(); } } -#endif // NPY_SIMD_FMA3 +#endif // NPY_SIMD_FMA3 /* Disable SIMD code sin/cos f64 and revert to libm: see * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ * for detailed discussion on this*/ -#define DISPATCH_DOUBLE_FUNC(func) \ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_##func) \ -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) \ -{ \ - UNARY_LOOP { \ - const npy_double in1 = *(npy_double *)ip1; \ - *(npy_double *)op1 = npy_##func(in1); \ - } \ -} \ +#define DISPATCH_DOUBLE_FUNC(func) \ + NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_##func)( \ + char **args, npy_intp const *dimensions, npy_intp const *steps, \ + void *NPY_UNUSED(data)) \ + { \ + UNARY_LOOP \ + { \ + const npy_double in1 = *(npy_double *)ip1; \ + *(npy_double *)op1 = npy_##func(in1); \ + } \ + } DISPATCH_DOUBLE_FUNC(sin) DISPATCH_DOUBLE_FUNC(cos) -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +NPY_NO_EXPORT void +NPY_CPU_DISPATCH_CURFX(FLOAT_sin)(char **args, npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(data)) { #if NPY_SIMD_FMA3 npy_intp len = dimensions[0]; if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || !npyv_loadable_stride_f32(steps[0]) || - !npyv_storable_stride_f32(steps[1]) - ) { - UNARY_LOOP { - simd_sincos_f32( - (npy_float *)ip1, 1, (npy_float *)op1, 1, 1, SIMD_COMPUTE_SIN); + !npyv_storable_stride_f32(steps[1])) { + UNARY_LOOP + { + simd_sincos_f32((npy_float *)ip1, 1, (npy_float *)op1, 1, 1, + SIMD_COMPUTE_SIN); } - } else { - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; + } + else { + const npy_float *src = (npy_float *)args[0]; + npy_float *dst = (npy_float *)args[1]; const npy_intp ssrc = steps[0] / sizeof(npy_float); const npy_intp sdst = steps[1] / sizeof(npy_float); simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_SIN); } #else - UNARY_LOOP { + UNARY_LOOP + { const npy_float in1 = *(npy_float *)ip1; *(npy_float *)op1 = npy_sinf(in1); } #endif } -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_cos) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +NPY_NO_EXPORT void +NPY_CPU_DISPATCH_CURFX(FLOAT_cos)(char **args, npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(data)) { #if NPY_SIMD_FMA3 npy_intp len = dimensions[0]; if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || !npyv_loadable_stride_f32(steps[0]) || - !npyv_storable_stride_f32(steps[1]) - ) { - UNARY_LOOP { - simd_sincos_f32( - (npy_float *)ip1, 1, (npy_float *)op1, 1, 1, SIMD_COMPUTE_COS); + !npyv_storable_stride_f32(steps[1])) { + UNARY_LOOP + { + simd_sincos_f32((npy_float *)ip1, 1, (npy_float *)op1, 1, 1, + SIMD_COMPUTE_COS); } - } else { - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; + } + else { + const npy_float *src = (npy_float *)args[0]; + npy_float *dst = (npy_float *)args[1]; const npy_intp ssrc = steps[0] / sizeof(npy_float); const npy_intp sdst = steps[1] / sizeof(npy_float); simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_COS); } #else - UNARY_LOOP { + UNARY_LOOP + { const npy_float in1 = *(npy_float *)ip1; *(npy_float *)op1 = npy_cosf(in1); } diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 37f990f970ed..f0f8b2f4153f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -81,9 +81,9 @@ static const npy_cfloat oneF = 1.0f, zeroF = 0.0f; */ NPY_NO_EXPORT void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, - void *ip2, npy_intp is2_n, npy_intp NPY_UNUSED(is2_p), - void *op, npy_intp op_m, npy_intp NPY_UNUSED(op_p), - npy_intp m, npy_intp n, npy_intp NPY_UNUSED(p)) + void *ip2, npy_intp is2_n, + void *op, npy_intp op_m, + npy_intp m, npy_intp n) { /* * Vector matrix multiplication -- Level 2 BLAS @@ -465,13 +465,12 @@ NPY_NO_EXPORT void op, os_m, os_p, dm, dn, dp); } else if (vector_matrix) { /* vector @ matrix, switch ip1, ip2, p and m */ - @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, is1_m, - op, os_p, os_m, dp, dn, dm); + @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, + op, os_p, dp, dn); } else if (matrix_vector) { /* matrix @ vector */ - @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, is2_p, - - op, os_m, os_p, dm, dn, dp); + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, + op, os_m, dm, dn); } else { /* column @ row, 2d output, no blas needed or non-blas-able input */ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n, @@ -655,3 +654,174 @@ NPY_NO_EXPORT void } } /**end repeat**/ + +#if defined(HAVE_CBLAS) +/* + * Blas complex vector-matrix product via gemm (gemv cannot conjugate the vector). + */ +/**begin repeat + * + * #name = CFLOAT, CDOUBLE# + * #typ = npy_cfloat, npy_cdouble# + * #prefix = c, z# + * #step1 = &oneF, &oneD# + * #step0 = &zeroF, &zeroD# + */ +NPY_NO_EXPORT void +@name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, + void *ip2, npy_intp is2_n, npy_intp is2_m, + void *op, npy_intp os_m, + npy_intp n, npy_intp m) +{ + enum CBLAS_ORDER order = CblasRowMajor; + enum CBLAS_TRANSPOSE trans1, trans2; + CBLAS_INT N, M, lda, ldb, ldc; + assert(n <= BLAS_MAXSIZE && m <= BLAS_MAXSIZE); + N = (CBLAS_INT)n; + M = (CBLAS_INT)m; + + assert(os_m == sizeof(@typ@)); + ldc = (CBLAS_INT)m; + + assert(is_blasable2d(is1_n, sizeof(@typ@), n, 1, sizeof(@typ@))); + trans1 = CblasConjTrans; + lda = (CBLAS_INT)(is1_n / sizeof(@typ@)); + + if (is_blasable2d(is2_n, is2_m, n, m, sizeof(@typ@))) { + trans2 = CblasNoTrans; + ldb = (CBLAS_INT)(is2_n / sizeof(@typ@)); + } + else { + assert(is_blasable2d(is2_m, is2_n, m, n, sizeof(@typ@))); + trans2 = CblasTrans; + ldb = (CBLAS_INT)(is2_m / sizeof(@typ@)); + } + CBLAS_FUNC(cblas_@prefix@gemm)( + order, trans1, trans2, 1, M, N, @step1@, ip1, lda, + ip2, ldb, @step0@, op, ldc); +} +/**end repeat**/ +#endif + +/* + * matvec loops, using blas gemv if possible, and TYPE_dot implementations otherwise. + * signature is (m,n),(n)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dm = dimensions[1], dn = dimensions[2]; + npy_intp is1_m=steps[3], is1_n=steps[4], is2_n=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_c_blasable = is_blasable2d(is1_m, is1_n, dm, dn, sizeof(@typ@)); + npy_bool i1_f_blasable = is_blasable2d(is1_n, is1_m, dn, dm, sizeof(@typ@)); + npy_bool i2_blasable = is_blasable2d(is2_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool blasable = ((i1_c_blasable || i1_f_blasable) && i2_blasable + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, op, os_m, dm, dn); + continue; + } +#endif + /* + * Dot the different matrix rows with the vector to get output elements. + * (no conjugation for complex, unlike vecdot and vecmat) + */ + for (npy_intp j = 0; j < dm; j++, ip1 += is1_m, op += os_m) { + @TYPE@_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +} +/**end repeat**/ + +/* + * vecmat loops, using blas gemv for float and gemm for complex if possible, + * and TYPE_dot[c] implementations otherwise. + * Note that we cannot use gemv for complex, since we need to conjugate the vector. + * signature is (n),(n,m)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #COMPLEX = 0*4, 1*3, 0*11, 1# + * #DOT = dot*4, dotc*3, dot*11, dotc# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dn = dimensions[1], dm = dimensions[2]; + npy_intp is1_n=steps[3], is2_n=steps[4], is2_m=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_blasable = is_blasable2d(is1_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool i2_c_blasable = is_blasable2d(is2_n, is2_m, dn, dm, sizeof(@typ@)); + npy_bool i2_f_blasable = is_blasable2d(is2_m, is2_n, dm, dn, sizeof(@typ@)); + npy_bool blasable = (i1_blasable && (i2_c_blasable || i2_f_blasable) + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { +#if @COMPLEX@ + /* For complex, use gemm so we can conjugate the vector */ + @TYPE@_vecmat_via_gemm(ip1, is1_n, ip2, is2_n, is2_m, op, os_m, dn, dm); +#else + /* For float, use gemv (hence flipped order) */ + @TYPE@_gemv(ip2, is2_m, is2_n, ip1, is1_n, op, os_m, dm, dn); +#endif + continue; + } +#endif + /* Dot the vector with different matrix columns to get output elements. */ + for (npy_intp j = 0; j < dm; j++, ip2 += is2_m, op += os_m) { + @TYPE@_@DOT@(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +} +/**end repeat**/ diff --git a/numpy/_core/src/umath/matmul.h.src b/numpy/_core/src/umath/matmul.h.src index df3f549a545a..bff3d73c8993 100644 --- a/numpy/_core/src/umath/matmul.h.src +++ b/numpy/_core/src/umath/matmul.h.src @@ -7,15 +7,10 @@ **/ NPY_NO_EXPORT void @TYPE@_matmul(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -/**end repeat**/ - -/**begin repeat - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL, OBJECT# - */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat**/ diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 548530e1ca3b..1d3937eee1eb 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -218,10 +218,13 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, NPY_ITER_ZEROSIZE_OK | NPY_ITER_REFS_OK | NPY_ITER_DELAY_BUFALLOC | + /* + * stride negation (if reorderable) could currently misalign the + * first-visit and initial value copy logic. + */ + NPY_ITER_DONT_NEGATE_STRIDES | NPY_ITER_COPY_IF_OVERLAP; - if (!(context->method->flags & NPY_METH_IS_REORDERABLE)) { - it_flags |= NPY_ITER_DONT_NEGATE_STRIDES; - } + op_flags[0] = NPY_ITER_READWRITE | NPY_ITER_ALIGNED | NPY_ITER_ALLOCATE | diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index cd28e4405b6d..a565eee8f939 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -937,7 +937,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) if (PyArray_IsScalar(value, @Name@)) { *result = PyArrayScalar_VAL(value, @Name@); /* - * In principle special, assyemetric, handling could be possible for + * In principle special, asymmetric, handling could be possible for * explicit subclasses. * In practice, we just check the normal deferring logic. */ @@ -956,10 +956,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) if (PyFloat_CheckExact(value)) { if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) { - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } /* Weak promotion is used when self is float or complex: */ if (!PyTypeNum_ISFLOAT(NPY_@TYPE@) && !PyTypeNum_ISCOMPLEX(NPY_@TYPE@)) { return PROMOTION_REQUIRED; @@ -976,19 +972,12 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) * long -> (c)longdouble is safe, so `OTHER_IS_UNKNOWN_OBJECT` will * be returned below for huge integers. */ - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } return CONVERT_PYSCALAR; } int overflow; long val = PyLong_AsLongAndOverflow(value, &overflow); if (overflow) { /* handle as if "unsafe" */ - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - return OTHER_IS_UNKNOWN_OBJECT; - } return CONVERT_PYSCALAR; } if (error_converting(val)) { @@ -1000,10 +989,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) if (PyComplex_CheckExact(value)) { if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) { - if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { - /* Legacy promotion and weak-and-warn not handled here */ - return PROMOTION_REQUIRED; - } /* Weak promotion is used when self is float or complex: */ if (!PyTypeNum_ISCOMPLEX(NPY_@TYPE@)) { return PROMOTION_REQUIRED; @@ -1369,7 +1354,7 @@ static PyObject * */ PyObject *ret; npy_float64 arg1, arg2, other_val; - @type@ other_val_conv; + @type@ other_val_conv = 0; int is_forward; if (Py_TYPE(a) == &Py@Name@ArrType_Type) { diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 665c47bbf067..ae89ede46ddc 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -866,7 +866,7 @@ string_find(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { char ch = *buf2; CheckedIndexer ind(start_loc, end_loc - start_loc); - result = (npy_intp) findchar(ind, end_loc - start_loc, ch); + result = (npy_intp) find_char(ind, end_loc - start_loc, ch); if (enc == ENCODING::UTF8 && result > 0) { result = utf8_character_index( start_loc, start_loc - buf1.buf, start, result, @@ -878,7 +878,7 @@ string_find(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { npy_ucs4 ch = *buf2; CheckedIndexer ind((npy_ucs4 *)(buf1 + start).buf, end-start); - result = (npy_intp) findchar(ind, end - start, ch); + result = (npy_intp) find_char(ind, end - start, ch); break; } } @@ -970,7 +970,7 @@ string_rfind(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { char ch = *buf2; CheckedIndexer ind(start_loc, end_loc - start_loc); - result = (npy_intp) rfindchar(ind, end_loc - start_loc, ch); + result = (npy_intp) rfind_char(ind, end_loc - start_loc, ch); if (enc == ENCODING::UTF8 && result > 0) { result = utf8_character_index( start_loc, start_loc - buf1.buf, start, result, @@ -982,7 +982,7 @@ string_rfind(Buffer buf1, Buffer buf2, npy_int64 start, npy_int64 end) { npy_ucs4 ch = *buf2; CheckedIndexer ind((npy_ucs4 *)(buf1 + start).buf, end - start); - result = (npy_intp) rfindchar(ind, end - start, ch); + result = (npy_intp) rfind_char(ind, end - start, ch); break; } } @@ -1236,14 +1236,14 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT case ENCODING::ASCII: { CheckedIndexer ind(buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); break; } case ENCODING::UTF8: { if (current_point_bytes == 1) { CheckedIndexer ind(buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); } else { res = fastsearch(buf2.buf, buf2.after - buf2.buf,traverse_buf.buf, current_point_bytes, -1, FAST_SEARCH); } @@ -1252,7 +1252,7 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); break; } } @@ -1280,14 +1280,14 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT case ENCODING::ASCII: { CheckedIndexer ind(buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); break; } case ENCODING::UTF8: { if (current_point_bytes == 1) { CheckedIndexer ind(buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); } else { res = fastsearch(buf2.buf, buf2.after - buf2.buf, traverse_buf.buf, current_point_bytes, -1, FAST_RSEARCH); } @@ -1296,7 +1296,7 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); - res = findchar(ind, len2, *traverse_buf); + res = find_char(ind, len2, *traverse_buf); break; } } @@ -1331,7 +1331,7 @@ findslice_for_replace(CheckedIndexer buf1, npy_intp len1, return 0; } if (len2 == 1) { - return (npy_intp) findchar(buf1, len1, *buf2); + return (npy_intp) find_char(buf1, len1, *buf2); } return (npy_intp) fastsearch(buf1.buffer, len1, buf2.buffer, len2, -1, FAST_SEARCH); } diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 61abdcb5ad19..54092d8b293d 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -9,6 +9,7 @@ #include #include +#include #include @@ -28,13 +29,37 @@ algorithm, which has worst-case O(n) runtime and best-case O(n/k). Also compute a table of shifts to achieve O(n/k) in more cases, and often (data dependent) deduce larger shifts than pure C&P can - deduce. See stringlib_find_two_way_notes.txt in this folder for a - detailed explanation. */ + deduce. See https://github.com/python/cpython/blob/main/Objects/stringlib/stringlib_find_two_way_notes.txt + in the CPython repository for a detailed explanation.*/ +/** + * @internal + * @brief Mode for counting the number of occurrences of a substring + */ #define FAST_COUNT 0 + +/** + * @internal + * @brief Mode for performing a forward search for a substring + */ #define FAST_SEARCH 1 + +/** + * @internal + * @brief Mode for performing a reverse (backward) search for a substring + */ #define FAST_RSEARCH 2 +/** + * @file_internal + * @brief Defines the bloom filter width based on the size of LONG_BIT. + * + * This macro sets the value of `STRINGLIB_BLOOM_WIDTH` depending on the + * size of the system's LONG_BIT. It ensures that the bloom filter + * width is at least 32 bits. + * + * @error If LONG_BIT is smaller than 32, a compilation error will occur. + */ #if LONG_BIT >= 128 #define STRINGLIB_BLOOM_WIDTH 128 #elif LONG_BIT >= 64 @@ -45,39 +70,98 @@ #error "LONG_BIT is smaller than 32" #endif +/** + * @file_internal + * @brief Adds a character to the bloom filter mask. + * + * This macro sets the bit in the bloom filter `mask` corresponding to the + * character `ch`. It uses the `STRINGLIB_BLOOM_WIDTH` to ensure the bit is + * within range. + * + * @param mask The bloom filter mask where the character will be added. + * @param ch The character to add to the bloom filter mask. + */ #define STRINGLIB_BLOOM_ADD(mask, ch) \ ((mask |= (1UL << ((ch) & (STRINGLIB_BLOOM_WIDTH -1))))) + +/** + * @file_internal + * @brief Checks if a character is present in the bloom filter mask. + * + * This macro checks if the bit corresponding to the character `ch` is set + * in the bloom filter `mask`. + * + * @param mask The bloom filter mask to check. + * @param ch The character to check in the bloom filter mask. + * @return 1 if the character is present, 0 otherwise. + */ #define STRINGLIB_BLOOM(mask, ch) \ ((mask & (1UL << ((ch) & (STRINGLIB_BLOOM_WIDTH -1))))) -#define FORWARD_DIRECTION 1 -#define BACKWARD_DIRECTION -1 +/** + * @file_internal + * @brief Threshold for using memchr or wmemchr in character search. + * + * If the search length exceeds this value, memchr/wmemchr is used. + */ #define MEMCHR_CUT_OFF 15 +/** + * @internal + * @brief A checked indexer for buffers of a specified character type. + * + * This structure provides safe indexing into a buffer with boundary checks. + * + * @internal + * + * @tparam char_type The type of characters stored in the buffer. + */ template struct CheckedIndexer { - char_type *buffer; - size_t length; + char_type *buffer; ///< Pointer to the buffer. + size_t length; ///< Length of the buffer. + /** + * @brief Default constructor that initializes the buffer to NULL and length to 0. + */ CheckedIndexer() { buffer = NULL; length = 0; } + /** + * @brief Constructor that initializes the indexer with a given buffer and length. + * + * @param buf Pointer to the character buffer. + * @param len Length of the buffer. + */ CheckedIndexer(char_type *buf, size_t len) { buffer = buf; length = len; } + /** + * @brief Dereference operator that returns the first character in the buffer. + * + * @return The first character in the buffer. + */ char_type operator*() { return *(this->buffer); } + /** + * @brief Subscript operator for safe indexing into the buffer. + * + * If the index is out of bounds, it returns 0. + * + * @param index Index to access in the buffer. + * @return The character at the specified index or 0 if out of bounds. + */ char_type operator[](size_t index) { @@ -87,6 +171,15 @@ struct CheckedIndexer { return this->buffer[index]; } + /** + * @brief Addition operator to move the indexer forward by a specified number of elements. + * + * @param rhs Number of elements to move forward. + * @return A new CheckedIndexer instance with updated buffer and length. + * + * @note If the specified number of elements to move exceeds the length of the buffer, + * the indexer will be moved to the end of the buffer, and the length will be set to 0. + */ CheckedIndexer operator+(size_t rhs) { @@ -96,6 +189,15 @@ struct CheckedIndexer { return CheckedIndexer(this->buffer + rhs, this->length - rhs); } + /** + * @brief Addition assignment operator to move the indexer forward. + * + * @param rhs Number of elements to move forward. + * @return Reference to the current CheckedIndexer instance. + * + * @note If the specified number of elements to move exceeds the length of the buffer, + * the indexer will be moved to the end of the buffer, and the length will be set to 0. + */ CheckedIndexer& operator+=(size_t rhs) { @@ -107,6 +209,13 @@ struct CheckedIndexer { return *this; } + /** + * @brief Postfix increment operator. + * + * @return A CheckedIndexer instance before incrementing. + * + * @note If the indexer is at the end of the buffer, this operation has no effect. + */ CheckedIndexer operator++(int) { @@ -114,6 +223,14 @@ struct CheckedIndexer { return *this; } + /** + * @brief Subtraction assignment operator to move the indexer backward. + * + * @param rhs Number of elements to move backward. + * @return Reference to the current CheckedIndexer instance. + * + * @note If the indexer moves backward past the start of the buffer, the behavior is undefined. + */ CheckedIndexer& operator-=(size_t rhs) { @@ -122,6 +239,13 @@ struct CheckedIndexer { return *this; } + /** + * @brief Postfix decrement operator. + * + * @return A CheckedIndexer instance before decrementing. + * + * @note If the indexer moves backward past the start of the buffer, the behavior is undefined. + */ CheckedIndexer operator--(int) { @@ -129,42 +253,86 @@ struct CheckedIndexer { return *this; } + /** + * @brief Subtraction operator to calculate the difference between two indexers. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return The difference in pointers between the two indexers. + */ std::ptrdiff_t operator-(CheckedIndexer rhs) { return this->buffer - rhs.buffer; } + /** + * @brief Subtraction operator to move the indexer backward by a specified number of elements. + * + * @param rhs Number of elements to move backward. + * @return A new CheckedIndexer instance with updated buffer and length. + * + * @note If the indexer moves backward past the start of the buffer, the behavior is undefined. + */ CheckedIndexer operator-(size_t rhs) { return CheckedIndexer(this->buffer - rhs, this->length + rhs); } + /** + * @brief Greater-than comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is greater than the right-hand side, otherwise false. + */ int operator>(CheckedIndexer rhs) { return this->buffer > rhs.buffer; } + /** + * @brief Greater-than-or-equal comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is greater than or equal to the right-hand side, otherwise false. + */ int operator>=(CheckedIndexer rhs) { return this->buffer >= rhs.buffer; } + /** + * @brief Less-than comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is less than the right-hand side, otherwise false. + */ int operator<(CheckedIndexer rhs) { return this->buffer < rhs.buffer; } + /** + * @brief Less-than-or-equal comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if this indexer is less than or equal to the right-hand side, otherwise false. + */ int operator<=(CheckedIndexer rhs) { return this->buffer <= rhs.buffer; } + /** + * @brief Equality comparison operator. + * + * @param rhs Another CheckedIndexer instance to compare. + * @return True if both indexers point to the same buffer, otherwise false. + */ int operator==(CheckedIndexer rhs) { @@ -173,9 +341,27 @@ struct CheckedIndexer { }; +/** + * @internal + * @brief Finds the first occurrence of a specified character in a + * given range of a buffer. + * + * This function searches for the character `ch` in the buffer represented + * by the `CheckedIndexer`. It uses different methods depending on the size + * of the range `n`. If `n` exceeds the `MEMCHR_CUT_OFF`, it utilizes + * `memchr` or `wmemchr` for more efficient searching. + * + * @tparam char_type The type of characters in the buffer. + * @param s The `CheckedIndexer` instance representing the buffer to + * search within. + * @param n The number of characters to search through in the buffer. + * @param ch The character to search for. + * @return The index of the first occurrence of `ch` within the range, + * or -1 if the character is not found or the range is invalid. + */ template inline Py_ssize_t -findchar(CheckedIndexer s, Py_ssize_t n, char_type ch) +find_char(CheckedIndexer s, Py_ssize_t n, char_type ch) { char_type *p = s.buffer, *e = (s + n).buffer; @@ -208,9 +394,27 @@ findchar(CheckedIndexer s, Py_ssize_t n, char_type ch) return -1; } +/** + * @internal + * @brief Finds the last occurrence of a specified character in a + * given range of a buffer. + * + * This function searches for the character `ch` in the buffer represented + * by the `CheckedIndexer`. It scans the buffer from the end towards the + * beginning, returning the index of the last occurrence of the specified + * character. + * + * @tparam char_type The type of characters in the buffer. + * @param s The `CheckedIndexer` instance representing the buffer to + * search within. + * @param n The number of characters to search through in the buffer. + * @param ch The character to search for. + * @return The index of the last occurrence of `ch` within the range, + * or -1 if the character is not found or the range is invalid. + */ template inline Py_ssize_t -rfindchar(CheckedIndexer s, Py_ssize_t n, char_type ch) +rfind_char(CheckedIndexer s, Py_ssize_t n, char_type ch) { CheckedIndexer p = s + n; while (p > s) { @@ -221,35 +425,67 @@ rfindchar(CheckedIndexer s, Py_ssize_t n, char_type ch) return -1; } - -/* Change to a 1 to see logging comments walk through the algorithm. */ +#undef MEMCHR_CUT_OFF + +/** + * @file_internal + * @brief Conditional logging for string fast search. + * + * Set to 1 to enable logging macros. + * + * @note These macros are used internally for debugging purposes + * and will be undefined later in the code. + */ #if 0 && STRINGLIB_SIZEOF_CHAR == 1 -# define LOG(...) printf(__VA_ARGS__) -# define LOG_STRING(s, n) printf("\"%.*s\"", (int)(n), s) -# define LOG_LINEUP() do { \ +/** Logs formatted output. */ +#define LOG(...) printf(__VA_ARGS__) + +/** Logs a string with a given length. */ +#define LOG_STRING(s, n) printf("\"%.*s\"", (int)(n), s) + +/** Logs the current state of the algorithm. */ +#define LOG_LINEUP() do { \ LOG("> "); LOG_STRING(haystack, len_haystack); LOG("\n> "); \ LOG("%*s",(int)(window_last - haystack + 1 - len_needle), ""); \ LOG_STRING(needle, len_needle); LOG("\n"); \ } while(0) #else -# define LOG(...) -# define LOG_STRING(s, n) -# define LOG_LINEUP() +#define LOG(...) +#define LOG_STRING(s, n) +#define LOG_LINEUP() #endif +/** + * @file_internal + * @brief Perform a lexicographic search for the maximal suffix in + * a given string. + * + * This function searches through the `needle` string to find the + * maximal suffix, which is essentially the largest lexicographic suffix. + * Essentially this: + * - max(needle[i:] for i in range(len(needle)+1)) + * + * Additionally, it computes the period of the right half of the string. + * + * @param needle The string to search in. + * @param len_needle The length of the needle string. + * @param return_period Pointer to store the period of the found suffix. + * @param invert_alphabet Flag to invert the comparison logic. + * @return The index of the maximal suffix found in the needle string. + * + * @note If `invert_alphabet` is non-zero, character comparisons are reversed, + * treating smaller characters as larger. + * + */ template static inline Py_ssize_t -_lex_search(CheckedIndexer needle, Py_ssize_t len_needle, +lex_search(CheckedIndexer needle, Py_ssize_t len_needle, Py_ssize_t *return_period, int invert_alphabet) { - /* Do a lexicographic search. Essentially this: - >>> max(needle[i:] for i in range(len(needle)+1)) - Also find the period of the right half. */ - Py_ssize_t max_suffix = 0; - Py_ssize_t candidate = 1; - Py_ssize_t k = 0; - // The period of the right half. - Py_ssize_t period = 1; + Py_ssize_t max_suffix = 0; // Index of the current maximal suffix found. + Py_ssize_t candidate = 1; // Candidate index for potential maximal suffix. + Py_ssize_t k = 0; // Offset for comparing characters. + Py_ssize_t period = 1; // Period of the right half. while (candidate + k < len_needle) { // each loop increases candidate + k + max_suffix @@ -286,51 +522,54 @@ _lex_search(CheckedIndexer needle, Py_ssize_t len_needle, period = 1; } } + *return_period = period; return max_suffix; } +/** + * @file_internal + * @brief Perform a critical factorization on a string. + * + * This function splits the input string into two parts where the local + * period is maximal. + * + * The function divides the input string as follows: + * - needle = (left := needle[:cut]) + (right := needle[cut:]) + * + * The local period is the minimal length of a string `w` such that: + * - left ends with `w` or `w` ends with left. + * - right starts with `w` or `w` starts with right. + * + * According to the Critical Factorization Theorem, this maximal local + * period is the global period of the string. The algorithm finds the + * cut using lexicographical order and its reverse to compute the maximal + * period, as shown by Crochemore and Perrin (1991). + * + * Example: + * For the string "GCAGAGAG", the split position (cut) is at 2, resulting in: + * - left = "GC" + * - right = "AGAGAG" + * The period of the right half is 2, and the repeated substring + * pattern "AG" verifies that this is the correct factorization. + * + * @param needle The input string as a CheckedIndexer. + * @param len_needle Length of the input string. + * @param return_period Pointer to store the computed period of the right half. + * @return The cut position where the string is factorized. + */ template static inline Py_ssize_t -_factorize(CheckedIndexer needle, +factorize(CheckedIndexer needle, Py_ssize_t len_needle, Py_ssize_t *return_period) { - /* Do a "critical factorization", making it so that: - >>> needle = (left := needle[:cut]) + (right := needle[cut:]) - where the "local period" of the cut is maximal. - - The local period of the cut is the minimal length of a string w - such that (left endswith w or w endswith left) - and (right startswith w or w startswith left). - - The Critical Factorization Theorem says that this maximal local - period is the global period of the string. - - Crochemore and Perrin (1991) show that this cut can be computed - as the later of two cuts: one that gives a lexicographically - maximal right half, and one that gives the same with the - with respect to a reversed alphabet-ordering. - - This is what we want to happen: - >>> x = "GCAGAGAG" - >>> cut, period = factorize(x) - >>> x[:cut], (right := x[cut:]) - ('GC', 'AGAGAG') - >>> period # right half period - 2 - >>> right[period:] == right[:-period] - True - - This is how the local period lines up in the above example: - GC | AGAGAG - AGAGAGC = AGAGAGC - The length of this minimal repetition is 7, which is indeed the - period of the original string. */ - Py_ssize_t cut1, period1, cut2, period2, cut, period; - cut1 = _lex_search(needle, len_needle, &period1, 0); - cut2 = _lex_search(needle, len_needle, &period2, 1); + + // Perform lexicographical search to find the first cut (normal order) + cut1 = lex_search(needle, len_needle, &period1, 0); + // Perform lexicographical search to find the second cut (reversed alphabet order) + cut2 = lex_search(needle, len_needle, &period2, 1); // Take the later cut. if (cut1 > cut2) { @@ -351,42 +590,99 @@ _factorize(CheckedIndexer needle, } +/** + * @file_internal + * @brief Internal macro to define the shift type used in the table. + */ #define SHIFT_TYPE uint8_t + +/** + * @file_internal + * @brief Internal macro to define the maximum shift value. + */ #define MAX_SHIFT UINT8_MAX + +/** + * @file_internal + * @brief Internal macro to define the number of bits for the table size. + */ #define TABLE_SIZE_BITS 6u + +/** + * @file_internal + * @brief Internal macro to define the table size based on TABLE_SIZE_BITS. + */ #define TABLE_SIZE (1U << TABLE_SIZE_BITS) + +/** + * @file_internal + * @brief Internal macro to define the table mask used for bitwise operations. + */ #define TABLE_MASK (TABLE_SIZE - 1U) +/** + * @file_internal + * @brief Struct to store precomputed data for string search algorithms. + * + * This structure holds all the necessary precomputed values needed + * to perform efficient string search operations on the given `needle` string. + * + * @tparam char_type Type of the characters in the string. + */ template struct prework { - CheckedIndexer needle; - Py_ssize_t len_needle; - Py_ssize_t cut; - Py_ssize_t period; - Py_ssize_t gap; - int is_periodic; - SHIFT_TYPE table[TABLE_SIZE]; + CheckedIndexer needle; ///< Indexer for the needle (substring). + Py_ssize_t len_needle; ///< Length of the needle. + Py_ssize_t cut; ///< Critical factorization cut point. + Py_ssize_t period; ///< Period of the right half of the needle. + Py_ssize_t gap; ///< Gap value for skipping during search. + int is_periodic; ///< Non-zero if the needle is periodic. + SHIFT_TYPE table[TABLE_SIZE]; ///< Shift table for optimizing search. }; +/** + * @file_internal + * @brief Preprocesses the needle (substring) for optimized string search. + * + * This function performs preprocessing on the given needle (substring) + * to prepare auxiliary data that will be used to optimize the string + * search algorithm. The preprocessing involves factorization of the + * substring, periodicity detection, gap computation, and the generation + * of a Boyer-Moore "Bad Character" shift table. + * + * @tparam char_type The character type of the string. + * @param needle The substring to be searched. + * @param len_needle The length of the substring. + * @param p A pointer to the search_prep_data structure where the preprocessing + * results will be stored. + */ template static void -_preprocess(CheckedIndexer needle, Py_ssize_t len_needle, +preprocess(CheckedIndexer needle, Py_ssize_t len_needle, prework *p) { + // Store the needle and its length, find the cut point and period. p->needle = needle; p->len_needle = len_needle; - p->cut = _factorize(needle, len_needle, &(p->period)); + p->cut = factorize(needle, len_needle, &(p->period)); assert(p->period + p->cut <= len_needle); + + // Compare parts of the needle to check for periodicity. int cmp; if (std::is_same::value) { - cmp = memcmp(needle.buffer, needle.buffer + (p->period * sizeof(npy_ucs4)), (size_t) p->cut); + cmp = memcmp(needle.buffer, + needle.buffer + (p->period * sizeof(npy_ucs4)), + (size_t) p->cut); } else { - cmp = memcmp(needle.buffer, needle.buffer + p->period, (size_t) p->cut); + cmp = memcmp(needle.buffer, needle.buffer + p->period, + (size_t) p->cut); } p->is_periodic = (0 == cmp); + + // If periodic, gap is unused; otherwise, calculate period and gap. if (p->is_periodic) { assert(p->cut <= len_needle/2); assert(p->cut < p->period); @@ -407,6 +703,7 @@ _preprocess(CheckedIndexer needle, Py_ssize_t len_needle, } } } + // Fill up a compressed Boyer-Moore "Bad Character" table Py_ssize_t not_found_shift = Py_MIN(len_needle, MAX_SHIFT); for (Py_ssize_t i = 0; i < (Py_ssize_t)TABLE_SIZE; i++) { @@ -420,13 +717,36 @@ _preprocess(CheckedIndexer needle, Py_ssize_t len_needle, } } +/** + * @file_internal + * @brief Searches for a needle (substring) within a haystack (string) + * using the Two-Way string matching algorithm. + * + * This function efficiently searches for a needle within a haystack using + * preprocessed data. It handles both periodic and non-periodic needles + * and optimizes the search process with a bad character shift table. The + * function iterates through the haystack in windows, skipping over sections + * that do not match, improving performance and reducing comparisons. + * + * For more details, refer to the following resources: + * - Crochemore and Perrin's (1991) Two-Way algorithm: + * [Two-Way Algorithm](http://www-igm.univ-mlv.fr/~lecroq/string/node26.html#SECTION00260). + * + * @tparam char_type The type of the characters in the needle and haystack + * (e.g., npy_ucs4). + * @param haystack The string to search within, wrapped in CheckedIndexer. + * @param len_haystack The length of the haystack. + * @param p A pointer to the search_prep_data structure containing + * preprocessed data for the needle. + * @return The starting index of the first occurrence of the needle + * within the haystack, or -1 if the needle is not found. + */ template static Py_ssize_t -_two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, +two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, prework *p) { - // Crochemore and Perrin's (1991) Two-Way algorithm. - // See http://www-igm.univ-mlv.fr/~lecroq/string/node26.html#SECTION00260 + // Initialize key variables for search. const Py_ssize_t len_needle = p->len_needle; const Py_ssize_t cut = p->cut; Py_ssize_t period = p->period; @@ -438,10 +758,13 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, LOG("===== Two-way: \"%s\" in \"%s\". =====\n", needle, haystack); if (p->is_periodic) { + // Handle the case where the needle is periodic. + // Memory optimization is used to skip over already checked segments. LOG("Needle is periodic.\n"); Py_ssize_t memory = 0; periodicwindowloop: while (window_last < haystack_end) { + // Bad-character shift loop to skip parts of the haystack. assert(memory == 0); for (;;) { LOG_LINEUP(); @@ -459,6 +782,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, window = window_last - len_needle + 1; assert((window[len_needle - 1] & TABLE_MASK) == (needle[len_needle - 1] & TABLE_MASK)); + // Check if the right half of the pattern matches the haystack. Py_ssize_t i = Py_MAX(cut, memory); for (; i < len_needle; i++) { if (needle[i] != window[i]) { @@ -468,6 +792,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, goto periodicwindowloop; } } + // Check if the left half of the pattern matches the haystack. for (i = memory; i < cut; i++) { if (needle[i] != window[i]) { LOG("Left half does not match.\n"); @@ -476,6 +801,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, if (window_last >= haystack_end) { return -1; } + // Apply memory adjustments and shifts if mismatches occur. Py_ssize_t shift = table[window_last[0] & TABLE_MASK]; if (shift) { // A mismatch has been identified to the right @@ -496,12 +822,15 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, } } else { + // Handle the case where the needle is non-periodic. + // General shift logic based on a gap is used to improve performance. Py_ssize_t gap = p->gap; period = Py_MAX(gap, period); LOG("Needle is not periodic.\n"); Py_ssize_t gap_jump_end = Py_MIN(len_needle, cut + gap); windowloop: while (window_last < haystack_end) { + // Bad-character shift loop for non-periodic patterns. for (;;) { LOG_LINEUP(); Py_ssize_t shift = table[window_last[0] & TABLE_MASK]; @@ -517,6 +846,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, window = window_last - len_needle + 1; assert((window[len_needle - 1] & TABLE_MASK) == (needle[len_needle - 1] & TABLE_MASK)); + // Check the right half of the pattern for a match. for (Py_ssize_t i = cut; i < gap_jump_end; i++) { if (needle[i] != window[i]) { LOG("Early right half mismatch: jump by gap.\n"); @@ -525,6 +855,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, goto windowloop; } } + // Continue checking the remaining right half of the pattern. for (Py_ssize_t i = gap_jump_end; i < len_needle; i++) { if (needle[i] != window[i]) { LOG("Late right half mismatch.\n"); @@ -533,6 +864,7 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, goto windowloop; } } + // Check the left half of the pattern for a match. for (Py_ssize_t i = 0; i < cut; i++) { if (needle[i] != window[i]) { LOG("Left half does not match.\n"); @@ -549,38 +881,70 @@ _two_way(CheckedIndexer haystack, Py_ssize_t len_haystack, } +/** + * @file_internal + * @brief Finds the first occurrence of a needle (substring) within a haystack (string). + * + * This function applies the two-way string matching algorithm to efficiently + * search for a needle (substring) within a haystack (main string). + * + * @tparam char_type The character type of the strings. + * @param haystack The string in which to search for the needle. + * @param len_haystack The length of the haystack string. + * @param needle The substring to search for in the haystack. + * @param len_needle The length of the needle substring. + * @return The position of the first occurrence of the needle in the haystack, + * or -1 if the needle is not found. + */ template static inline Py_ssize_t -_two_way_find(CheckedIndexer haystack, Py_ssize_t len_haystack, +two_way_find(CheckedIndexer haystack, Py_ssize_t len_haystack, CheckedIndexer needle, Py_ssize_t len_needle) { LOG("###### Finding \"%s\" in \"%s\".\n", needle, haystack); prework p; - _preprocess(needle, len_needle, &p); - return _two_way(haystack, len_haystack, &p); + preprocess(needle, len_needle, &p); + return two_way(haystack, len_haystack, &p); } +/** + * @file_internal + * @brief Counts the occurrences of a needle (substring) within a haystack (string). + * + * This function applies the two-way string matching algorithm to count how many + * times a needle (substring) appears within a haystack (main string). It stops + * counting when the maximum number of occurrences (`max_count`) is reached. + * + * @tparam char_type The character type of the strings. + * @param haystack The string in which to search for occurrences of the needle. + * @param len_haystack The length of the haystack string. + * @param needle The substring to search for in the haystack. + * @param len_needle The length of the needle substring. + * @param max_count The maximum number of occurrences to count before returning. + * @return The number of occurrences of the needle in the haystack. + * If the maximum count is reached, it returns `max_count`. + */ template static inline Py_ssize_t -_two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, +two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, CheckedIndexer needle, Py_ssize_t len_needle, - Py_ssize_t maxcount) + Py_ssize_t max_count) { LOG("###### Counting \"%s\" in \"%s\".\n", needle, haystack); prework p; - _preprocess(needle, len_needle, &p); + preprocess(needle, len_needle, &p); Py_ssize_t index = 0, count = 0; while (1) { Py_ssize_t result; - result = _two_way(haystack + index, - len_haystack - index, &p); + result = two_way(haystack + index, + len_haystack - index, &p); if (result == -1) { return count; } count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } index += result + len_needle; } @@ -588,8 +952,8 @@ _two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, } #undef SHIFT_TYPE -#undef NOT_FOUND -#undef SHIFT_OVERFLOW +#undef MAX_SHIFT + #undef TABLE_SIZE_BITS #undef TABLE_SIZE #undef TABLE_MASK @@ -598,11 +962,35 @@ _two_way_count(CheckedIndexer haystack, Py_ssize_t len_haystack, #undef LOG_STRING #undef LOG_LINEUP +/** + * @internal + * @brief A function that searches for a substring `p` in the + * string `s` using a bloom filter to optimize character matching. + * + * This function searches for occurrences of a pattern `p` in + * the given string `s`. It uses a bloom filter for fast rejection + * of non-matching characters and performs character-by-character + * comparison for potential matches. The algorithm is based on the + * Boyer-Moore string search technique. + * + * @tparam char_type The type of characters in the strings. + * @param s The haystack (string) to search in. + * @param n The length of the haystack string `s`. + * @param p The needle (substring) to search for. + * @param m The length of the needle substring `p`. + * @param max_count The maximum number of matches to return. + * @param mode The search mode. + * If mode is `FAST_COUNT`, the function counts occurrences of the + * pattern, otherwise it returns the index of the first match. + * @return If mode is not `FAST_COUNT`, returns the index of the first + * occurrence, or `-1` if no match is found. If `FAST_COUNT`, + * returns the number of occurrences found up to `max_count`. + */ template static inline Py_ssize_t default_find(CheckedIndexer s, Py_ssize_t n, CheckedIndexer p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { const Py_ssize_t w = n - m; Py_ssize_t mlast = m - 1, count = 0; @@ -610,6 +998,7 @@ default_find(CheckedIndexer s, Py_ssize_t n, const char_type last = p[mlast]; CheckedIndexer ss = s + mlast; + // Add pattern to bloom filter and calculate the gap. unsigned long mask = 0; for (Py_ssize_t i = 0; i < mlast; i++) { STRINGLIB_BLOOM_ADD(mask, p[i]); @@ -634,8 +1023,8 @@ default_find(CheckedIndexer s, Py_ssize_t n, return i; } count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } i = i + mlast; continue; @@ -659,11 +1048,26 @@ default_find(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Performs an adaptive string search using a bloom filter and fallback + * to two-way search for large data. + * + * @tparam char_type The type of characters in the string. + * @param s The haystack to search in. + * @param n Length of the haystack. + * @param p The needle to search for. + * @param m Length of the needle. + * @param max_count Maximum number of matches to count. + * @param mode Search mode. + * @return The index of the first occurrence of the needle, or -1 if not found. + * If in FAST_COUNT mode, returns the number of matches found up to max_count. + */ template static Py_ssize_t adaptive_find(CheckedIndexer s, Py_ssize_t n, CheckedIndexer p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { const Py_ssize_t w = n - m; Py_ssize_t mlast = m - 1, count = 0; @@ -696,8 +1100,8 @@ adaptive_find(CheckedIndexer s, Py_ssize_t n, return i; } count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } i = i + mlast; continue; @@ -705,11 +1109,11 @@ adaptive_find(CheckedIndexer s, Py_ssize_t n, hits += j + 1; if (hits > m / 4 && w - i > 2000) { if (mode == FAST_SEARCH) { - res = _two_way_find(s + i, n - i, p, m); + res = two_way_find(s + i, n - i, p, m); return res == -1 ? -1 : res + i; } else { - res = _two_way_count(s + i, n - i, p, m, maxcount - count); + res = two_way_count(s + i, n - i, p, m, max_count - count); return res + count; } } @@ -732,11 +1136,28 @@ adaptive_find(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Performs a reverse Boyer-Moore string search. + * + * This function searches for the last occurrence of a pattern in a string, + * utilizing the Boyer-Moore algorithm with a bloom filter for fast skipping + * of mismatches. + * + * @tparam char_type The type of characters in the string (e.g., char, wchar_t). + * @param s The haystack to search in. + * @param n Length of the haystack. + * @param p The needle (pattern) to search for. + * @param m Length of the needle (pattern). + * @param max_count Maximum number of matches to count (not used in this version). + * @param mode Search mode (not used, only support right find mode). + * @return The index of the last occurrence of the needle, or -1 if not found. + */ template static Py_ssize_t default_rfind(CheckedIndexer s, Py_ssize_t n, CheckedIndexer p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { /* create compressed boyer-moore delta 1 table */ unsigned long mask = 0; @@ -783,17 +1204,32 @@ default_rfind(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Counts occurrences of a specified character in a given string. + * + * This function iterates through the string `s` and counts how many times + * the character `p0` appears, stopping when the count reaches `max_count`. + * + * @tparam char_type The type of characters in the string. + * @param s The string in which to count occurrences of the character. + * @param n The length of the string `s`. + * @param p0 The character to count in the string. + * @param max_count The maximum number of occurrences to count before stopping. + * @return The total count of occurrences of `p0` in `s`, or `max_count` + * if that many occurrences were found. + */ template static inline Py_ssize_t countchar(CheckedIndexer s, Py_ssize_t n, - const char_type p0, Py_ssize_t maxcount) + const char_type p0, Py_ssize_t max_count) { Py_ssize_t i, count = 0; for (i = 0; i < n; i++) { if (s[i] == p0) { count++; - if (count == maxcount) { - return maxcount; + if (count == max_count) { + return max_count; } } } @@ -801,16 +1237,40 @@ countchar(CheckedIndexer s, Py_ssize_t n, } +/** + * @internal + * @brief Searches for occurrences of a substring `p` in the string `s` + * using various optimized search algorithms. + * + * This function determines the most appropriate searching method based on + * the lengths of the input string `s` and the pattern `p`, as well as the + * specified search mode. It handles special cases for patterns of length 0 or 1 + * and selects between default, two-way, adaptive, or reverse search algorithms. + * + * @tparam char_type The type of characters in the strings. + * @param s The haystack (string) to search in. + * @param n The length of the haystack string `s`. + * @param p The needle (substring) to search for. + * @param m The length of the needle substring `p`. + * @param max_count The maximum number of matches to return. + * @param mode The search mode, which can be: + * - `FAST_SEARCH`: Searches for the first occurrence. + * - `FAST_RSEARCH`: Searches for the last occurrence. + * - `FAST_COUNT`: Counts occurrences of the pattern. + * @return If `mode` is not `FAST_COUNT`, returns the index of the first occurrence + * of `p` in `s`, or `-1` if no match is found. If `FAST_COUNT`, returns + * the number of occurrences found up to `max_count`. + */ template inline Py_ssize_t fastsearch(char_type* s, Py_ssize_t n, char_type* p, Py_ssize_t m, - Py_ssize_t maxcount, int mode) + Py_ssize_t max_count, int mode) { CheckedIndexer s_(s, n); CheckedIndexer p_(p, m); - if (n < m || (mode == FAST_COUNT && maxcount == 0)) { + if (n < m || (mode == FAST_COUNT && max_count == 0)) { return -1; } @@ -821,17 +1281,17 @@ fastsearch(char_type* s, Py_ssize_t n, } /* use special case for 1-character strings */ if (mode == FAST_SEARCH) - return findchar(s_, n, p_[0]); + return find_char(s_, n, p_[0]); else if (mode == FAST_RSEARCH) - return rfindchar(s_, n, p_[0]); + return rfind_char(s_, n, p_[0]); else { - return countchar(s_, n, p_[0], maxcount); + return countchar(s_, n, p_[0], max_count); } } if (mode != FAST_RSEARCH) { if (n < 2500 || (m < 100 && n < 30000) || m < 6) { - return default_find(s_, n, p_, m, maxcount, mode); + return default_find(s_, n, p_, m, max_count, mode); } else if ((m >> 2) * 3 < (n >> 2)) { /* 33% threshold, but don't overflow. */ @@ -840,24 +1300,25 @@ fastsearch(char_type* s, Py_ssize_t n, expensive O(m) startup cost of the two-way algorithm will surely pay off. */ if (mode == FAST_SEARCH) { - return _two_way_find(s_, n, p_, m); + return two_way_find(s_, n, p_, m); } else { - return _two_way_count(s_, n, p_, m, maxcount); + return two_way_count(s_, n, p_, m, max_count); } } else { + // ReSharper restore CppRedundantElseKeyword /* To ensure that we have good worst-case behavior, here's an adaptive version of the algorithm, where if we match O(m) characters without any matches of the entire needle, then we predict that the startup cost of the two-way algorithm will probably be worth it. */ - return adaptive_find(s_, n, p_, m, maxcount, mode); + return adaptive_find(s_, n, p_, m, max_count, mode); } } else { /* FAST_RSEARCH */ - return default_rfind(s_, n, p_, m, maxcount, mode); + return default_rfind(s_, n, p_, m, max_count, mode); } } diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 2bc4ce20acd6..0e28240ee5f0 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,6 +643,20 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { + npy_intp result_itemsize = given_descrs[0]->elsize + given_descrs[1]->elsize; + + /* NOTE: elsize can fit more than MAX_INT, but some code may still use ints */ + if (result_itemsize > NPY_MAX_INT || result_itemsize < 0) { + npy_intp length = result_itemsize; + if (given_descrs[0]->type == NPY_UNICODE) { + length /= 4; + } + PyErr_Format(PyExc_TypeError, + "addition result string of length %zd is too large to store inside array.", + length); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -650,11 +664,14 @@ string_addition_resolve_descriptors( loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2] = PyArray_DescrNew(loop_descrs[0]); if (loop_descrs[2] == NULL) { + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2]->elsize += loop_descrs[1]->elsize; diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index ed9f62077589..8e25b3968cfe 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -1598,6 +1598,20 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, return -1; } +static int +string_center_ljust_rjust_promoter( + PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_StringDType); + return 0; +} + static NPY_CASTING center_ljust_rjust_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), @@ -2595,10 +2609,17 @@ init_stringdtype_ufuncs(PyObject *umath) "find", "rfind", "index", "rindex", "count", }; - PyArray_DTypeMeta *findlike_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_DefaultIntDType, + PyArray_DTypeMeta *findlike_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_IntAbstractDType, + }, }; find_like_function *findlike_functions[] = { @@ -2618,11 +2639,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, findlike_names[i], - findlike_promoter_dtypes, - 5, string_findlike_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, findlike_names[i], + findlike_promoter_dtypes[j], + 5, string_findlike_promoter) < 0) { + return -1; + } } } @@ -2636,10 +2658,17 @@ init_stringdtype_ufuncs(PyObject *umath) "startswith", "endswith", }; - PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, - &PyArray_BoolDType, + PyArray_DTypeMeta *startswith_endswith_promoter_dtypes[2][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_IntAbstractDType, + &PyArray_BoolDType, + }, }; static STARTPOSITION startswith_endswith_startposition[] = { @@ -2656,11 +2685,12 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - - if (add_promoter(umath, startswith_endswith_names[i], - startswith_endswith_promoter_dtypes, - 5, string_startswith_endswith_promoter) < 0) { - return -1; + for (int j=0; j<2; j++) { + if (add_promoter(umath, startswith_endswith_names[i], + startswith_endswith_promoter_dtypes[j], + 5, string_startswith_endswith_promoter) < 0) { + return -1; + } } } @@ -2732,24 +2762,38 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *replace_promoter_pyint_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_IntAbstractDType, &PyArray_StringDType, - }; - - if (add_promoter(umath, "_replace", replace_promoter_pyint_dtypes, 5, - string_replace_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *replace_promoter_int64_dtypes[] = { - &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, - &PyArray_Int64DType, &PyArray_StringDType, + PyArray_DTypeMeta *replace_promoter_unicode_dtypes[6][5] = { + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_StringDType, &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_StringDType, &PyArray_UnicodeDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, &PyArray_StringDType, &PyArray_StringDType, + &PyArray_IntAbstractDType, &PyArray_StringDType, + }, }; - if (add_promoter(umath, "_replace", replace_promoter_int64_dtypes, 5, - string_replace_promoter) < 0) { - return -1; + for (int j=0; j<6; j++) { + if (add_promoter(umath, "_replace", replace_promoter_unicode_dtypes[j], 5, + string_replace_promoter) < 0) { + return -1; + } } PyArray_DTypeMeta *expandtabs_dtypes[] = { @@ -2767,9 +2811,9 @@ init_stringdtype_ufuncs(PyObject *umath) } PyArray_DTypeMeta *expandtabs_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType }; if (add_promoter(umath, "_expandtabs", expandtabs_promoter_dtypes, @@ -2801,30 +2845,33 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_StringDType, - &PyArray_StringDType, - }; - - if (add_promoter(umath, center_ljust_rjust_names[i], - int_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; - } - - PyArray_DTypeMeta *unicode_promoter_dtypes[] = { - &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, - &PyArray_UnicodeDType, - &PyArray_StringDType, + PyArray_DTypeMeta *promoter_dtypes[3][4] = { + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, + { + &PyArray_StringDType, + &PyArray_IntAbstractDType, + &PyArray_UnicodeDType, + &PyArray_StringDType, + }, + { + &PyArray_UnicodeDType, + &PyArray_IntAbstractDType, + &PyArray_StringDType, + &PyArray_StringDType, + }, }; - if (add_promoter(umath, center_ljust_rjust_names[i], - unicode_promoter_dtypes, 4, - string_multiply_promoter) < 0) { - return -1; + for (int j=0; j<3; j++) { + if (add_promoter(umath, center_ljust_rjust_names[i], + promoter_dtypes[j], 4, + string_center_ljust_rjust_promoter) < 0) { + return -1; + } } } @@ -2840,13 +2887,13 @@ init_stringdtype_ufuncs(PyObject *umath) return -1; } - PyArray_DTypeMeta *int_promoter_dtypes[] = { + PyArray_DTypeMeta *zfill_promoter_dtypes[] = { &PyArray_StringDType, - (PyArray_DTypeMeta *)Py_None, + &PyArray_IntAbstractDType, &PyArray_StringDType, }; - if (add_promoter(umath, "_zfill", int_promoter_dtypes, 3, + if (add_promoter(umath, "_zfill", zfill_promoter_dtypes, 3, string_multiply_promoter) < 0) { return -1; } diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 6bd02b0fec87..69bb0b1eb197 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -664,12 +664,6 @@ convert_ufunc_arguments(PyUFuncObject *ufunc, continue; } - // TODO: Is this equivalent/better by removing the logic which enforces - // that we always use weak promotion in the core? - if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { - continue; /* Skip use of special dtypes */ - } - /* * Handle the "weak" Python scalars/literals. We use a special DType * for these. @@ -1114,7 +1108,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, * based on the fixed strides. */ PyArrayMethod_StridedLoop *strided_loop; - NpyAuxData *auxdata; + NpyAuxData *auxdata = NULL; npy_intp fixed_strides[NPY_MAXARGS]; NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); @@ -2599,6 +2593,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, int idim, ndim; int needs_api, need_outer_iterator; int res = 0; + + NPY_cast_info copy_info; + NPY_cast_info_init(©_info); + #if NPY_UF_DBG_TRACING const char *ufunc_name = ufunc_get_name_cstr(ufunc); #endif @@ -2643,14 +2641,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, assert(PyArray_EquivTypes(descrs[0], descrs[1]) && PyArray_EquivTypes(descrs[0], descrs[2])); - if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) { - /* This can be removed, but the initial element copy needs fixing */ - PyErr_SetString(PyExc_TypeError, - "accumulation currently only supports `object` dtype with " - "references"); - goto fail; - } - PyArrayMethod_Context context = { .caller = (PyObject *)ufunc, .method = ufuncimpl, @@ -2746,10 +2736,10 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, else { PyArray_Descr *dtype = descrs[0]; Py_INCREF(dtype); - op[0] = out = (PyArrayObject *)PyArray_NewFromDescr( + op[0] = out = (PyArrayObject *)PyArray_NewFromDescr_int( &PyArray_Type, dtype, ndim, PyArray_DIMS(op[1]), NULL, NULL, - 0, NULL); + 0, NULL, NULL, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); if (out == NULL) { goto fail; } @@ -2772,6 +2762,18 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, 1, 0, fixed_strides, &strided_loop, &auxdata, &flags) < 0) { goto fail; } + /* Set up function to copy the first element if it has references */ + if (PyDataType_REFCHK(descrs[2])) { + NPY_ARRAYMETHOD_FLAGS copy_flags; + /* Setup guarantees aligned here. */ + if (PyArray_GetDTypeTransferFunction( + 1, 0, 0, descrs[1], descrs[2], 0, ©_info, + ©_flags) == NPY_FAIL) { + goto fail; + } + flags = PyArrayMethod_COMBINED_FLAGS(flags, copy_flags); + } + needs_api = (flags & NPY_METH_REQUIRES_PYAPI) != 0; if (!(flags & NPY_METH_NO_FLOATINGPOINT_ERRORS)) { /* Start with the floating-point exception flags cleared */ @@ -2835,18 +2837,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, * Output (dataptr[0]) and input (dataptr[1]) may point to * the same memory, e.g. np.add.accumulate(a, out=a). */ - if (descrs[2]->type_num == NPY_OBJECT) { - /* - * Incref before decref to avoid the possibility of the - * reference count being zero temporarily. - */ - Py_XINCREF(*(PyObject **)dataptr_copy[1]); - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; + if (copy_info.func) { + const npy_intp one = 1; + if (copy_info.func( + ©_info.context, &dataptr_copy[1], &one, + &stride_copy[1], copy_info.auxdata) < 0) { + NPY_END_THREADS; + goto fail; + } } else { - memmove(dataptr_copy[0], dataptr_copy[1], itemsize); + memmove(dataptr_copy[2], dataptr_copy[1], itemsize); } if (count_m1 > 0) { @@ -2895,18 +2896,17 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, * Output (dataptr[0]) and input (dataptr[1]) may point to the * same memory, e.g. np.add.accumulate(a, out=a). */ - if (descrs[2]->type_num == NPY_OBJECT) { - /* - * Incref before decref to avoid the possibility of the - * reference count being zero temporarily. - */ - Py_XINCREF(*(PyObject **)dataptr_copy[1]); - Py_XDECREF(*(PyObject **)dataptr_copy[0]); - *(PyObject **)dataptr_copy[0] = - *(PyObject **)dataptr_copy[1]; + if (copy_info.func) { + const npy_intp one = 1; + const npy_intp strides[2] = {itemsize, itemsize}; + if (copy_info.func( + ©_info.context, &dataptr_copy[1], &one, + strides, copy_info.auxdata) < 0) { + goto fail; + } } else { - memmove(dataptr_copy[0], dataptr_copy[1], itemsize); + memmove(dataptr_copy[2], dataptr_copy[1], itemsize); } if (count > 1) { @@ -2916,8 +2916,6 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, NPY_UF_DBG_PRINT1("iterator loop count %d\n", (int)count); - needs_api = PyDataType_REFCHK(descrs[0]); - if (!needs_api) { NPY_BEGIN_THREADS_THRESHOLDED(count); } @@ -2931,6 +2929,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, finish: NPY_AUXDATA_FREE(auxdata); + NPY_cast_info_xfree(©_info); Py_DECREF(descrs[0]); Py_DECREF(descrs[1]); Py_DECREF(descrs[2]); @@ -2955,6 +2954,8 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out, Py_XDECREF(out); NPY_AUXDATA_FREE(auxdata); + NPY_cast_info_xfree(©_info); + Py_XDECREF(descrs[0]); Py_XDECREF(descrs[1]); Py_XDECREF(descrs[2]); @@ -3011,7 +3012,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind, const char *ufunc_name = ufunc_get_name_cstr(ufunc); char *opname = "reduceat"; - /* These parameters comefrom a TLS global */ + /* These parameters come from a TLS global */ int buffersize = 0, errormask = 0; NPY_BEGIN_THREADS_DEF; @@ -4199,7 +4200,7 @@ resolve_descriptors(int nop, * None --- array-object passed in don't call PyArray_Return * method --- the __array_wrap__ method to call. * - * @param ufunc + * @param ufunc The universal function to be wrapped * @param full_args Original inputs and outputs * @param subok Whether subclasses are allowed * @param result_arrays The ufunc result(s). REFERENCES ARE STOLEN! @@ -4693,6 +4694,7 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi ufunc->core_signature = NULL; ufunc->core_enabled = 0; ufunc->obj = NULL; + ufunc->dict = NULL; ufunc->core_num_dims = NULL; ufunc->core_num_dim_ix = 0; ufunc->core_offsets = NULL; @@ -4777,6 +4779,11 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi return NULL; } } + ufunc->dict = PyDict_New(); + if (ufunc->dict == NULL) { + Py_DECREF(ufunc); + return NULL; + } /* * TODO: I tried adding a default promoter here (either all object for * some special cases, or all homogeneous). Those are reasonable @@ -5183,6 +5190,7 @@ ufunc_dealloc(PyUFuncObject *ufunc) Py_DECREF(ufunc->identity_value); } Py_XDECREF(ufunc->obj); + Py_XDECREF(ufunc->dict); Py_XDECREF(ufunc->_loops); if (ufunc->_dispatch_cache != NULL) { PyArrayIdentityHash_Dealloc(ufunc->_dispatch_cache); @@ -5203,6 +5211,7 @@ ufunc_traverse(PyUFuncObject *self, visitproc visit, void *arg) if (self->identity == PyUFunc_IdentityValue) { Py_VISIT(self->identity_value); } + Py_VISIT(self->dict); return 0; } @@ -5954,7 +5963,6 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) NPY_AUXDATA_FREE(auxdata); Py_XDECREF(op2_array); - Py_XDECREF(iter); Py_XDECREF(iter2); for (int i = 0; i < nop; i++) { Py_XDECREF(operation_descrs[i]); @@ -5970,9 +5978,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) if (PyArray_FLAGS(op1_array) & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_DiscardWritebackIfCopy(op1_array); } + // iter might own the last refrence to op1_array, + // so it must be decref'd second + Py_XDECREF(iter); return NULL; } else { + Py_XDECREF(iter); Py_RETURN_NONE; } } @@ -6065,10 +6077,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, PyArray_DTypeMeta *signature[NPY_MAXARGS] = {NULL}; PyArray_Descr *operation_descrs[NPY_MAXARGS] = {NULL}; - /* This entry-point to promotion lives in the NEP 50 future: */ - int original_promotion_state = get_npy_promotion_state(); - set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); - npy_bool promoting_pyscalars = NPY_FALSE; if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) { @@ -6250,8 +6258,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, Py_DECREF(capsule); finish: - set_npy_promotion_state(original_promotion_state); - Py_XDECREF(result_dtype_tuple); for (int i = 0; i < ufunc->nargs; i++) { Py_XDECREF(signature[i]); @@ -6423,6 +6429,15 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { PyObject *doc; + // If there is a __doc__ in the instance __dict__, use it. + int result = PyDict_GetItemRef(ufunc->dict, npy_interned_str.__doc__, &doc); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return doc; + } + if (npy_cache_import_runtime( "numpy._core._internal", "_ufunc_doc_signature_formatter", &npy_runtime_imports._ufunc_doc_signature_formatter) == -1) { @@ -6446,6 +6461,15 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) return doc; } +static int +ufunc_set_doc(PyUFuncObject *ufunc, PyObject *doc, void *NPY_UNUSED(ignored)) +{ + if (doc == NULL) { + return PyDict_DelItem(ufunc->dict, npy_interned_str.__doc__); + } else { + return PyDict_SetItem(ufunc->dict, npy_interned_str.__doc__, doc); + } +} static PyObject * ufunc_get_nin(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) @@ -6531,8 +6555,8 @@ ufunc_get_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) static PyGetSetDef ufunc_getset[] = { {"__doc__", - (getter)ufunc_get_doc, - NULL, NULL, NULL}, + (getter)ufunc_get_doc, (setter)ufunc_set_doc, + NULL, NULL}, {"nin", (getter)ufunc_get_nin, NULL, NULL, NULL}, @@ -6561,6 +6585,17 @@ static PyGetSetDef ufunc_getset[] = { }; +/****************************************************************************** + *** UFUNC MEMBERS *** + *****************************************************************************/ + +static PyMemberDef ufunc_members[] = { + {"__dict__", T_OBJECT, offsetof(PyUFuncObject, dict), + READONLY}, + {NULL}, +}; + + /****************************************************************************** *** UFUNC TYPE OBJECT *** *****************************************************************************/ @@ -6580,6 +6615,12 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = { .tp_traverse = (traverseproc)ufunc_traverse, .tp_methods = ufunc_methods, .tp_getset = ufunc_getset, + .tp_getattro = PyObject_GenericGetAttr, + .tp_setattro = PyObject_GenericSetAttr, + // TODO when Python 3.12 is the minimum supported version, + // use Py_TPFLAGS_MANAGED_DICT + .tp_members = ufunc_members, + .tp_dictoffset = offsetof(PyUFuncObject, dict), }; /* End of code for ufunc objects */ diff --git a/numpy/_core/src/umath/ufunc_object.h b/numpy/_core/src/umath/ufunc_object.h index f8e522374394..dc55a561fba5 100644 --- a/numpy/_core/src/umath/ufunc_object.h +++ b/numpy/_core/src/umath/ufunc_object.h @@ -3,6 +3,9 @@ #include +#ifdef __cplusplus +extern "C" { +#endif NPY_NO_EXPORT const char* ufunc_get_name_cstr(PyUFuncObject *ufunc); @@ -10,4 +13,8 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc); NPY_NO_EXPORT PyObject * PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index cabcff3b9bef..95670efb936f 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -1919,17 +1919,7 @@ linear_search_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - int promotion_state = get_npy_promotion_state(); - - assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); - /* Always "use" with new promotion in case of Python int/float/complex */ - int use_min_scalar; - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); - } - else { - use_min_scalar = should_use_min_scalar_weak_literals(nin, op); - } + int use_min_scalar = should_use_min_scalar_weak_literals(nin, op); /* If the ufunc has userloops, search for them. */ if (self->userloops) { @@ -2123,17 +2113,7 @@ type_tuple_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - int promotion_state = get_npy_promotion_state(); - - assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); - /* Always "use" with new promotion in case of Python int/float/complex */ - int use_min_scalar; - if (promotion_state == NPY_USE_LEGACY_PROMOTION) { - use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); - } - else { - use_min_scalar = should_use_min_scalar_weak_literals(nin, op); - } + int use_min_scalar = should_use_min_scalar_weak_literals(nin, op); /* Fill in specified_types from the tuple or string */ const char *bad_type_tup_msg = ( @@ -2248,19 +2228,17 @@ PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc, return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); } - if (type_num1 == NPY_TIMEDELTA) { - if (type_num2 == NPY_TIMEDELTA) { - out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]), - PyArray_DESCR(operands[1])); - out_dtypes[1] = out_dtypes[0]; - Py_INCREF(out_dtypes[1]); - out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG); - out_dtypes[3] = out_dtypes[0]; - Py_INCREF(out_dtypes[3]); - } - else { - return raise_binary_type_reso_error(ufunc, operands); + if (type_num1 == NPY_TIMEDELTA && type_num2 == NPY_TIMEDELTA) { + out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]), + PyArray_DESCR(operands[1])); + if (out_dtypes[0] == NULL) { + return -1; } + out_dtypes[1] = out_dtypes[0]; + Py_INCREF(out_dtypes[1]); + out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG); + out_dtypes[3] = out_dtypes[0]; + Py_INCREF(out_dtypes[3]); } else { return raise_binary_type_reso_error(ufunc, operands); diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 3f8e7505ea39..9e812e97d6fe 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -1,6 +1,10 @@ #ifndef _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ #define _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, @@ -142,4 +146,8 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, NPY_NO_EXPORT int raise_no_loop_found_error(PyUFuncObject *ufunc, PyObject **dtypes); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index 0c8fc4857ea7..e5cf2cf8acb3 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -167,6 +167,13 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) { + + /* 2024-11-12, NumPy 2.2 */ + if (DEPRECATE("_add_newdoc_ufunc is deprecated. " + "Use `ufunc.__doc__ = newdoc` instead.") < 0) { + return NULL; + } + PyUFuncObject *ufunc; PyObject *str; if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc, diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 0820411840ea..b751b5d773a0 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -10,6 +10,7 @@ add, multiply as _multiply_ufunc, ) from numpy._core.multiarray import _vec_string +from numpy._core.overrides import set_module from numpy._core.umath import ( isalpha, isdigit, @@ -48,6 +49,18 @@ ) +def _override___module__(): + for ufunc in [ + isalnum, isalpha, isdecimal, isdigit, islower, isnumeric, isspace, + istitle, isupper, str_len, + ]: + ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ + + +_override___module__() + + __all__ = [ # UFuncs "equal", "not_equal", "less", "less_equal", "greater", "greater_equal", @@ -116,6 +129,7 @@ def _clean_args(*args): return newargs +@set_module("numpy.strings") def multiply(a, i): """ Return (a * i), that is string multiple concatenation, @@ -179,6 +193,7 @@ def multiply(a, i): return _multiply_ufunc(a, i, out=out) +@set_module("numpy.strings") def mod(a, values): """ Return (a % i), that is pre-Python 2.6 string formatting @@ -215,6 +230,7 @@ def mod(a, values): _vec_string(a, np.object_, '__mod__', (values,)), a) +@set_module("numpy.strings") def find(a, sub, start=0, end=None): """ For each element, return the lowest index in the string where @@ -252,6 +268,7 @@ def find(a, sub, start=0, end=None): return _find_ufunc(a, sub, start, end) +@set_module("numpy.strings") def rfind(a, sub, start=0, end=None): """ For each element, return the highest index in the string where @@ -294,6 +311,7 @@ def rfind(a, sub, start=0, end=None): return _rfind_ufunc(a, sub, start, end) +@set_module("numpy.strings") def index(a, sub, start=0, end=None): """ Like `find`, but raises :exc:`ValueError` when the substring is not found. @@ -327,6 +345,7 @@ def index(a, sub, start=0, end=None): return _index_ufunc(a, sub, start, end) +@set_module("numpy.strings") def rindex(a, sub, start=0, end=None): """ Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is @@ -360,6 +379,7 @@ def rindex(a, sub, start=0, end=None): return _rindex_ufunc(a, sub, start, end) +@set_module("numpy.strings") def count(a, sub, start=0, end=None): """ Returns an array with the number of non-overlapping occurrences of @@ -404,6 +424,7 @@ def count(a, sub, start=0, end=None): return _count_ufunc(a, sub, start, end) +@set_module("numpy.strings") def startswith(a, prefix, start=0, end=None): """ Returns a boolean array which is `True` where the string element @@ -444,6 +465,7 @@ def startswith(a, prefix, start=0, end=None): return _startswith_ufunc(a, prefix, start, end) +@set_module("numpy.strings") def endswith(a, suffix, start=0, end=None): """ Returns a boolean array which is `True` where the string element @@ -484,6 +506,7 @@ def endswith(a, suffix, start=0, end=None): return _endswith_ufunc(a, suffix, start, end) +@set_module("numpy.strings") def decode(a, encoding=None, errors=None): r""" Calls :meth:`bytes.decode` element-wise. @@ -531,6 +554,7 @@ def decode(a, encoding=None, errors=None): np.str_('')) +@set_module("numpy.strings") def encode(a, encoding=None, errors=None): """ Calls :meth:`str.encode` element-wise. @@ -575,6 +599,7 @@ def encode(a, encoding=None, errors=None): np.bytes_(b'')) +@set_module("numpy.strings") def expandtabs(a, tabsize=8): """ Return a copy of each string element where all tab characters are @@ -626,6 +651,7 @@ def expandtabs(a, tabsize=8): return _expandtabs(a, tabsize, out=out) +@set_module("numpy.strings") def center(a, width, fillchar=' '): """ Return a copy of `a` with its elements centered in a string of @@ -669,23 +695,31 @@ def center(a, width, fillchar=' '): array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> import numpy as np @@ -1205,22 +1266,29 @@ def replace(a, old, new, count=-1): array(['The dwash was fresh', 'Thwas was it'], dtype='>> np.char.splitlines("first line\\nsecond line") + array(list(['first line', 'second line']), dtype=object) + >>> a = np.array(["first\\nsecond", "third\\nfourth"]) + >>> np.char.splitlines(a) + array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object) + """ return _vec_string( a, np.object_, 'splitlines', _clean_args(keepends)) +@set_module("numpy.strings") def partition(a, sep): """ Partition each element in ``a`` around ``sep``. @@ -1421,11 +1498,12 @@ def partition(a, sep): """ a = np.asanyarray(a) - # TODO switch to copy=False when issues around views are fixed - sep = np.array(sep, dtype=a.dtype, copy=True, subok=True) - if a.dtype.char == "T": + sep = np.asanyarray(sep) + + if np.result_type(a, sep).char == "T": return _partition(a, sep) + sep = sep.astype(a.dtype, copy=False) pos = _find_ufunc(a, sep, 0, MAX) a_len = str_len(a) sep_len = str_len(sep) @@ -1444,6 +1522,7 @@ def partition(a, sep): return _partition_index(a, sep, pos, out=(out["f0"], out["f1"], out["f2"])) +@set_module("numpy.strings") def rpartition(a, sep): """ Partition (split) each element around the right-most separator. @@ -1487,11 +1566,12 @@ def rpartition(a, sep): """ a = np.asanyarray(a) - # TODO switch to copy=False when issues around views are fixed - sep = np.array(sep, dtype=a.dtype, copy=True, subok=True) - if a.dtype.char == "T": + sep = np.asanyarray(sep) + + if np.result_type(a, sep).char == "T": return _rpartition(a, sep) + sep = sep.astype(a.dtype, copy=False) pos = _rfind_ufunc(a, sep, 0, MAX) a_len = str_len(a) sep_len = str_len(sep) @@ -1511,6 +1591,7 @@ def rpartition(a, sep): a, sep, pos, out=(out["f0"], out["f1"], out["f2"])) +@set_module("numpy.strings") def translate(a, table, deletechars=None): """ For each element in `a`, return a copy of the string where all diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index 5e335c6f7d4a..b6c15b5c3ca3 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -1,4 +1,4 @@ -from typing import Any, overload +from typing import Any, overload, TypeAlias import numpy as np from numpy._typing import ( @@ -6,65 +6,97 @@ from numpy._typing import ( _ArrayLikeStr_co as U_co, _ArrayLikeBytes_co as S_co, _ArrayLikeInt_co as i_co, - _ArrayLikeBool_co as b_co, + _ArrayLikeString_co as T_co, + _ArrayLikeAnyString_co as UST_co, + _Shape, + _SupportsArray, ) + +_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] +_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType] + @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @overload def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... @overload def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... @overload def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @overload def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... @overload def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... -def isalpha(x: U_co | S_co) -> NDArray[np.bool]: ... -def isalnum(a: U_co | S_co) -> NDArray[np.bool]: ... -def isdigit(x: U_co | S_co) -> NDArray[np.bool]: ... -def isspace(x: U_co | S_co) -> NDArray[np.bool]: ... -def isdecimal(x: U_co) -> NDArray[np.bool]: ... -def isnumeric(x: U_co) -> NDArray[np.bool]: ... -def islower(a: U_co | S_co) -> NDArray[np.bool]: ... -def istitle(a: U_co | S_co) -> NDArray[np.bool]: ... -def isupper(a: U_co | S_co) -> NDArray[np.bool]: ... +def isalpha(x: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdigit(x: UST_co) -> NDArray[np.bool]: ... +def isspace(x: UST_co) -> NDArray[np.bool]: ... +def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... +def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... -def str_len(x: U_co | S_co) -> NDArray[np.int_]: ... +def str_len(x: UST_co) -> NDArray[np.int_]: ... @overload def find( @@ -80,6 +112,13 @@ def find( start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def rfind( @@ -95,6 +134,13 @@ def rfind( start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def index( @@ -110,6 +156,13 @@ def index( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[np.int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def rindex( @@ -125,6 +178,13 @@ def rindex( start: i_co = ..., end: None | i_co = ..., ) -> NDArray[np.int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def count( @@ -140,6 +200,13 @@ def count( start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... @overload def startswith( @@ -155,6 +222,13 @@ def startswith( start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... @overload def endswith( @@ -170,15 +244,21 @@ def endswith( start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... def decode( a: S_co, encoding: None | str = ..., errors: None | str = ..., ) -> NDArray[np.str_]: ... - def encode( - a: U_co, + a: U_co | T_co, encoding: None | str = ..., errors: None | str = ..., ) -> NDArray[np.bytes_]: ... @@ -187,16 +267,28 @@ def encode( def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... @overload def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... @overload def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... @overload def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def rjust( @@ -210,51 +302,99 @@ def rjust( width: i_co, fillchar: S_co = ..., ) -> NDArray[np.bytes_]: ... +@overload +def rjust( + a: _StringDTypeSupportsArray, + width: i_co, + fillchar: _StringDTypeSupportsArray = ..., +) -> _StringDTypeArray: ... +@overload +def rjust( + a: T_co, + width: i_co, + fillchar: T_co = ..., +) -> _StringDTypeOrUnicodeArray: ... @overload def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... @overload def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def rstrip(a: U_co, char: None | U_co = ...) -> NDArray[np.str_]: ... @overload def rstrip(a: S_co, char: None | S_co = ...) -> NDArray[np.bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def strip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... @overload def strip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... @overload def zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... @overload def upper(a: U_co) -> NDArray[np.str_]: ... @overload def upper(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def lower(a: U_co) -> NDArray[np.str_]: ... @overload def lower(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def swapcase(a: U_co) -> NDArray[np.str_]: ... @overload def swapcase(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def capitalize(a: U_co) -> NDArray[np.str_]: ... @overload def capitalize(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def title(a: U_co) -> NDArray[np.str_]: ... @overload def title(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def replace( @@ -270,62 +410,69 @@ def replace( new: S_co, count: i_co = ..., ) -> NDArray[np.bytes_]: ... - -@overload -def join(sep: U_co, seq: U_co) -> NDArray[np.str_]: ... -@overload -def join(sep: S_co, seq: S_co) -> NDArray[np.bytes_]: ... - @overload -def split( - a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = ..., +) -> _StringDTypeArray: ... @overload -def split( - a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = ..., +) -> _StringDTypeOrUnicodeArray: ... @overload -def rsplit( - a: U_co, - sep: None | U_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... +def join(sep: U_co, seq: U_co) -> NDArray[np.str_]: ... @overload -def rsplit( - a: S_co, - sep: None | S_co = ..., - maxsplit: None | i_co = ..., -) -> NDArray[np.object_]: ... - +def join(sep: S_co, seq: S_co) -> NDArray[np.bytes_]: ... @overload -def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ... +def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ... +def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... @overload def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... @overload def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def translate( a: U_co, - table: U_co, - deletechars: None | U_co = ..., + table: str, + deletechars: None | str = ..., ) -> NDArray[np.str_]: ... @overload def translate( a: S_co, - table: S_co, - deletechars: None | S_co = ..., + table: str, + deletechars: None | str = ..., ) -> NDArray[np.bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: None | str = ..., +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: None | str = ..., +) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/_core/tests/_locales.py b/numpy/_core/tests/_locales.py index b1dc55a9b2dc..2244e0abda71 100644 --- a/numpy/_core/tests/_locales.py +++ b/numpy/_core/tests/_locales.py @@ -52,8 +52,6 @@ class CommaDecimalPointLocale: to the initial locale. It also serves as context manager with the same effect. If no such locale is available, the test is skipped. - .. versionadded:: 1.15.0 - """ (cur_locale, tst_locale) = find_comma_decimal_point_locale() diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index b51ab128053f..34359fb42fcb 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -129,6 +129,10 @@ def get_default_integer(): return cnp.dtype("intp") return None +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + def conv_intp(cnp.intp_t val): return val @@ -262,3 +266,9 @@ def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): # This works in both modes arr[1].real = arr[1].real + 1 arr[1].imag = arr[1].imag + 1 + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 1ac7a49b3610..0a3edcce2bc4 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -302,8 +302,8 @@ def test_object_array_astype_to_void(): assert arr.dtype == "V8" @pytest.mark.parametrize("t", - np._core.sctypes['uint'] + - np._core.sctypes['int'] + + np._core.sctypes['uint'] + + np._core.sctypes['int'] + np._core.sctypes['float'] ) def test_array_astype_warning(t): diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py index 154b3837325d..cccf5d346c8b 100644 --- a/numpy/_core/tests/test_array_api_info.py +++ b/numpy/_core/tests/test_array_api_info.py @@ -6,8 +6,8 @@ def test_capabilities(): caps = info.capabilities() - assert caps["boolean indexing"] == True - assert caps["data-dependent shapes"] == True + assert caps["boolean indexing"] is True + assert caps["data-dependent shapes"] is True # This will be added in the 2024.12 release of the array API standard. diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index ee7b7c8d6685..c7ceb92650c9 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -762,6 +762,17 @@ def __getitem__(self): with pytest.raises(error): np.array(BadSequence()) + def test_array_interface_descr_optional(self): + # The descr should be optional regression test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + class TestAsArray: """Test expected behaviors of ``asarray``.""" diff --git a/numpy/_core/tests/test_arraymethod.py b/numpy/_core/tests/test_arraymethod.py index f10d9b984987..6083381af858 100644 --- a/numpy/_core/tests/test_arraymethod.py +++ b/numpy/_core/tests/test_arraymethod.py @@ -5,7 +5,6 @@ from __future__ import annotations -import sys import types from typing import Any diff --git a/numpy/_core/tests/test_arrayobject.py b/numpy/_core/tests/test_arrayobject.py index ccab929b2a09..ffa1ba001776 100644 --- a/numpy/_core/tests/test_arrayobject.py +++ b/numpy/_core/tests/test_arrayobject.py @@ -31,3 +31,45 @@ def test_matrix_transpose_equals_swapaxes(shape): tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) mT = arr.mT assert_array_equal(tgt, mT) + + +class MyArr(np.ndarray): + def __array_wrap__(self, arr, context=None, return_scalar=None): + return super().__array_wrap__(arr, context, return_scalar) + + +class MyArrNoWrap(np.ndarray): + pass + + +@pytest.mark.parametrize("subclass_self", [np.ndarray, MyArr, MyArrNoWrap]) +@pytest.mark.parametrize("subclass_arr", [np.ndarray, MyArr, MyArrNoWrap]) +def test_array_wrap(subclass_self, subclass_arr): + # NumPy should allow `__array_wrap__` to be called on arrays, it's logic + # is designed in a way that: + # + # * Subclasses never return scalars by default (to preserve their + # information). They can choose to if they wish. + # * NumPy returns scalars, if `return_scalar` is passed as True to allow + # manual calls to `arr.__array_wrap__` to do the right thing. + # * The type of the input should be ignored (it should be a base-class + # array, but I am not sure this is guaranteed). + + arr = np.arange(3).view(subclass_self) + + arr0d = np.array(3, dtype=np.int8).view(subclass_arr) + # With third argument True, ndarray allows "decay" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + if subclass_self is np.ndarray: + assert type(arr.__array_wrap__(arr0d, None, True)) is np.int8 + else: + assert type(arr.__array_wrap__(arr0d, None, True)) is type(arr) + + # Otherwise, result should be viewed as the subclass + assert type(arr.__array_wrap__(arr0d)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, None)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, False)) is type(arr) + + # Non 0-D array can't be converted to scalar, so we ignore that + arr1d = np.array([3], dtype=np.int8).view(subclass_arr) + assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 5b0642cbb0bd..aebfd6d087ab 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -19,7 +19,8 @@ def test_nan_inf(self): assert_equal(repr(x), 'array([nan, inf])') def test_subclass(self): - class sub(np.ndarray): pass + class sub(np.ndarray): + pass # one dimensional x1d = np.array([1, 2]).view(sub) @@ -143,7 +144,7 @@ def test_self_containing(self): first[()] = 0 # resolve circular references for garbage collector def test_containing_list(self): - # printing square brackets directly would be ambiguuous + # printing square brackets directly would be ambiguous arr1d = np.array([None, None]) arr1d[0] = [1, 2] arr1d[1] = [3] @@ -345,7 +346,13 @@ def test_summarize_1d(self): assert_equal(str(A), strA) reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' - assert_equal(repr(A), reprA) + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(1001,))')) def test_summarize_2d(self): A = np.arange(1002).reshape(2, 501) @@ -355,6 +362,23 @@ def test_summarize_2d(self): reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(2, 501))')) + + def test_summarize_2d_dtype(self): + A = np.arange(1002, dtype='i2').reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = ('array([[ 0, 1, 2, ..., 498, 499, 500],\n' + ' [ 501, 502, 503, ..., 999, 1000, 1001]],\n' + ' shape=(2, 501), dtype=int16)') assert_equal(repr(A), reprA) def test_summarize_structure(self): @@ -627,8 +651,9 @@ def teardown_method(self): def test_basic(self): x = np.array([1.5, 0, 1.234567890]) assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") - np.set_printoptions(precision=4) + ret = np.set_printoptions(precision=4) assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") + assert ret is None def test_precision_zero(self): np.set_printoptions(precision=0) @@ -1038,7 +1063,7 @@ def test_edgeitems(self): [[18, ..., 20], ..., - [24, ..., 26]]])""") + [24, ..., 26]]], shape=(3, 3, 3))""") ) b = np.zeros((3, 3, 1, 1)) @@ -1059,40 +1084,37 @@ def test_edgeitems(self): ..., - [[0.]]]])""") + [[0.]]]], shape=(3, 3, 1, 1))""") ) # 1.13 had extra trailing spaces, and was missing newlines - np.set_printoptions(legacy='1.13') - - assert_equal( - repr(a), - textwrap.dedent("""\ - array([[[ 0, ..., 2], - ..., - [ 6, ..., 8]], - - ..., - [[18, ..., 20], - ..., - [24, ..., 26]]])""") - ) - - assert_equal( - repr(b), - textwrap.dedent("""\ - array([[[[ 0.]], - - ..., - [[ 0.]]], - - - ..., - [[[ 0.]], - - ..., - [[ 0.]]]])""") - ) + try: + np.set_printoptions(legacy='1.13') + assert_equal(repr(a), ( + "array([[[ 0, ..., 2],\n" + " ..., \n" + " [ 6, ..., 8]],\n" + "\n" + " ..., \n" + " [[18, ..., 20],\n" + " ..., \n" + " [24, ..., 26]]])") + ) + assert_equal(repr(b), ( + "array([[[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]],\n" + "\n" + "\n" + " ..., \n" + " [[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]]])") + ) + finally: + np.set_printoptions(legacy=False) def test_edgeitems_structured(self): np.set_printoptions(edgeitems=1, threshold=1) @@ -1126,7 +1148,7 @@ def test_ctx_mgr(self): assert_equal(s, '[0.67]') def test_ctx_mgr_restores(self): - # test that print options are actually restrored + # test that print options are actually restored opts = np.get_printoptions() with np.printoptions(precision=opts['precision'] - 1, linewidth=opts['linewidth'] - 4): diff --git a/numpy/_core/tests/test_casting_unittests.py b/numpy/_core/tests/test_casting_unittests.py index 087d12a0af53..50b4f45b1f5a 100644 --- a/numpy/_core/tests/test_casting_unittests.py +++ b/numpy/_core/tests/test_casting_unittests.py @@ -776,7 +776,7 @@ def test_structured_field_offsets(self, to_dt, expected_off): # completely invalid/impossible cast: ("i,i", "i,i,i", None), ]) - def test_structured_view_offsets_paramteric( + def test_structured_view_offsets_parametric( self, from_dt, to_dt, expected_off): # TODO: While this test is fairly thorough, right now, it does not # really test some paths that may have nonzero offsets (they don't diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 35d81005cfc1..956f9630a0c5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -1,14 +1,16 @@ -import sys, platform, re, pytest +import os +import re +import sys +import pathlib +import platform +import subprocess +import pytest from numpy._core._multiarray_umath import ( __cpu_features__, __cpu_baseline__, __cpu_dispatch__, ) import numpy as np -import subprocess -import pathlib -import os -import re def assert_features_equal(actual, desired, fname): __tracebackhide__ = True # Hide traceback for py.test @@ -136,7 +138,7 @@ class TestEnvPrivation: SCRIPT = """ def main(): from numpy._core._multiarray_umath import ( - __cpu_features__, + __cpu_features__, __cpu_dispatch__ ) diff --git a/numpy/_core/tests/test_custom_dtypes.py b/numpy/_core/tests/test_custom_dtypes.py index e8acb450516b..6120bb36b320 100644 --- a/numpy/_core/tests/test_custom_dtypes.py +++ b/numpy/_core/tests/test_custom_dtypes.py @@ -1,4 +1,3 @@ -import sys from tempfile import NamedTemporaryFile import pytest @@ -117,7 +116,7 @@ def test_possible_and_impossible_reduce(self): # For reductions to work, the first and last operand must have the # same dtype. For this parametric DType that is not necessarily true. a = self._get_array(2.) - # Addition reductin works (as of writing requires to pass initial + # Addition reduction works (as of writing requires to pass initial # because setting a scaled-float from the default `0` fails). res = np.add.reduce(a, initial=0.) assert res == a.astype(np.float64).sum() diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 26a1fafa0066..d7fe28a8f053 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -1,9 +1,7 @@ from datetime import datetime import os -import shutil import subprocess import sys -import time import pytest import numpy as np @@ -48,7 +46,8 @@ def install_temp(tmpdir_factory): native_file = str(build_dir / 'interpreter-native-file.ini') with open(native_file, 'w') as f: f.write("[binaries]\n") - f.write(f"python = '{sys.executable}'") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") try: subprocess.check_call(["meson", "--version"]) @@ -72,7 +71,7 @@ def install_temp(tmpdir_factory): print("----------------") print("meson build failed when doing") print(f"'meson setup --native-file {native_file} {srcdir}'") - print(f"'meson compile -vv'") + print("'meson compile -vv'") print(f"in {build_dir}") print("----------------") raise @@ -153,6 +152,13 @@ def test_default_int(install_temp): assert checks.get_default_integer() is np.dtype(int) + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + def test_convert_datetime64_to_datetimestruct(install_temp): # GH#21199 import checks @@ -210,10 +216,8 @@ def test_multiiter_fields(install_temp, arrays): assert bcast.shape == checks.get_multiiter_shape(bcast) assert bcast.index == checks.get_multiiter_current_index(bcast) assert all( - [ - x.base is y.base - for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) - ] + x.base is y.base + for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) ) @@ -273,10 +277,8 @@ def test_npyiter_api(install_temp): x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) ) assert all( - [ - np.allclose(x, y) - for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) - ] + np.allclose(x, y) + for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) ) @@ -289,7 +291,13 @@ def test_fillwithbytes(install_temp): def test_complex(install_temp): from checks import inc2_cfloat_struct - + arr = np.array([0, 10+10j], dtype="F") inc2_cfloat_struct(arr) assert arr[1] == (12 + 12j) + + +def test_npy_uintp_type_enum(): + import checks + assert checks.check_npy_uintp_type_enum() + diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index 70a294796a0d..17b25a75716e 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -24,6 +24,12 @@ RecursionError = RuntimeError # python < 3.5 +def _assert_equal_hash(v1, v2): + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v2 in {v1} + + class TestDateTime: def test_string(self): @@ -1390,6 +1396,14 @@ def test_timedelta_divmod(self, op1, op2): expected = (op1 // op2, op1 % op2) assert_equal(divmod(op1, op2), expected) + @pytest.mark.parametrize("op1, op2", [ + # Y and M are incompatible with all units except Y and M + (np.timedelta64(1, 'Y'), np.timedelta64(1, 's')), + (np.timedelta64(1, 'D'), np.timedelta64(1, 'M')), + ]) + def test_timedelta_divmod_typeerror(self, op1, op2): + assert_raises(TypeError, np.divmod, op1, op2) + @pytest.mark.skipif(IS_WASM, reason="does not work in wasm") @pytest.mark.parametrize("op1, op2", [ # reuse cases from floordiv @@ -2552,6 +2566,101 @@ def test_limit_str_roundtrip(self, time_unit, sign): limit_via_str = np.datetime64(str(limit), time_unit) assert limit_via_str == limit + def test_datetime_hash_nat(self): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('unit', ('h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks_vs_pydatetime(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + pydt = dt2.astype(datetime.datetime) + assert isinstance(pydt, datetime.datetime) + _assert_equal_hash(pydt, dt2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_negative(self, unit): + dt = np.datetime64(-102894, 'W') # -002-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + # can only go down to "fs" before integer overflow + @pytest.mark.parametrize('unit', ('m', 's', 'ms', 'us', 'ns', 'ps', 'fs')) + def test_datetime_hash_minutes(self, unit): + dt = np.datetime64(3, 'm') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + @pytest.mark.parametrize('unit', ('ns', 'ps', 'fs', 'as')) + def test_datetime_hash_ns(self, unit): + dt = np.datetime64(3, 'ns') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) # 11552-09-04 + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_positive(self, wk, unit): + dt = np.datetime64(wk, 'W') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + def test_timedelta_hash_generic(self): + assert_raises(ValueError, hash, np.timedelta64(123)) # generic + + @pytest.mark.parametrize('unit', ('Y', 'M')) + def test_timedelta_hash_year_month(self, unit): + td = np.timedelta64(45, 'Y') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks_vs_pydelta(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + pytd = td2.astype(datetime.timedelta) + assert isinstance(pytd, datetime.timedelta) + _assert_equal_hash(pytd, td2) + + @pytest.mark.parametrize('unit', ('ms', 'us', 'ns', 'ps', 'fs', 'as')) + def test_timedelta_hash_ms(self, unit): + td = np.timedelta64(3, 'ms') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_big_positive(self, wk, unit): + td = np.timedelta64(wk, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + class TestDateTimeData: diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index 33431faef684..f0ac55fc5c6f 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -3,13 +3,10 @@ to document how deprecations should eventually be turned into errors. """ -import datetime -import operator import warnings import pytest import tempfile import re -import sys import numpy as np from numpy.testing import ( @@ -18,6 +15,7 @@ ) from numpy._core._multiarray_tests import fromstring_null_term_c_api +import numpy._core._struct_ufunc_tests as struct_ufunc try: import pytz @@ -199,19 +197,6 @@ def test_3_tuple(self): self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63))) -class TestTruthTestingEmptyArrays(_DeprecationTestCase): - # 2017-09-25, 1.14.0 - message = '.*truth value of an empty array is ambiguous.*' - - def test_1d(self): - self.assert_deprecated(bool, args=(np.array([]),)) - - def test_2d(self): - self.assert_deprecated(bool, args=(np.zeros((1, 0)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 1)),)) - self.assert_deprecated(bool, args=(np.zeros((0, 0)),)) - - class TestBincount(_DeprecationTestCase): # 2017-06-01, 1.14.0 def test_bincount_minlength(self): @@ -501,7 +486,7 @@ class TestMachAr(_DeprecationTestCase): warning_cls = DeprecationWarning def test_deprecated_module(self): - self.assert_deprecated(lambda: getattr(np._core, "MachAr")) + self.assert_deprecated(lambda: np._core.MachAr) class TestQuantileInterpolationDeprecation(_DeprecationTestCase): @@ -730,7 +715,7 @@ def test_parenthesized_repeat_count(self, string): class TestDeprecatedSaveFixImports(_DeprecationTestCase): # Deprecated in Numpy 2.1, 2024-05 message = "The 'fix_imports' flag is deprecated and has no effect." - + def test_deprecated(self): with temppath(suffix='.npy') as path: sample_args = (path, np.array(np.zeros((1024, 10)))) @@ -748,3 +733,13 @@ def test_deprecated(self): self.assert_deprecated(np.save, args=sample_args, kwargs={'allow_pickle': allow_pickle, 'fix_imports': False}) + + +class TestAddNewdocUFunc(_DeprecationTestCase): + # Deprecated in Numpy 2.2, 2024-11 + def test_deprecated(self): + self.assert_deprecated( + lambda: np._core.umath._add_newdoc_ufunc( + struct_ufunc.add_triplet, "new docs" + ) + ) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 869183956f78..deeca5171c2d 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -13,7 +13,7 @@ from numpy._core._multiarray_tests import create_custom_field_dtype from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT, - IS_PYSTON, _OLD_PROMOTION) + IS_PYSTON) from itertools import permutations import random @@ -1433,34 +1433,25 @@ class TestPromotion: """Test cases related to more complex DType promotions. Further promotion tests are defined in `test_numeric.py` """ - @np._no_nep50_warning() - @pytest.mark.parametrize(["other", "expected", "expected_weak"], - [(2**16-1, np.complex64, None), - (2**32-1, np.complex128, np.complex64), - (np.float16(2), np.complex64, None), - (np.float32(2), np.complex64, None), - (np.longdouble(2), np.complex64, np.clongdouble), + @pytest.mark.parametrize(["other", "expected"], + [(2**16-1, np.complex64), + (2**32-1, np.complex64), + (np.float16(2), np.complex64), + (np.float32(2), np.complex64), + (np.longdouble(2), np.clongdouble), # Base of the double value to sidestep any rounding issues: - (np.longdouble(np.nextafter(1.7e308, 0.)), - np.complex128, np.clongdouble), + (np.longdouble(np.nextafter(1.7e308, 0.)), np.clongdouble), # Additionally use "nextafter" so the cast can't round down: - (np.longdouble(np.nextafter(1.7e308, np.inf)), - np.clongdouble, None), + (np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), # repeat for complex scalars: - (np.complex64(2), np.complex64, None), - (np.clongdouble(2), np.complex64, np.clongdouble), + (np.complex64(2), np.complex64), + (np.clongdouble(2), np.clongdouble), # Base of the double value to sidestep any rounding issues: - (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), - np.complex128, np.clongdouble), + (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.clongdouble), # Additionally use "nextafter" so the cast can't round down: - (np.clongdouble(np.nextafter(1.7e308, np.inf)), - np.clongdouble, None), + (np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble), ]) - def test_complex_other_value_based(self, - weak_promotion, other, expected, expected_weak): - if weak_promotion and expected_weak is not None: - expected = expected_weak - + def test_complex_other_value_based(self, other, expected): # This would change if we modify the value based promotion min_complex = np.dtype(np.complex64) @@ -1511,22 +1502,11 @@ def test_python_integer_promotion(self, val): @pytest.mark.parametrize(["other", "expected"], [(1, rational), (1., np.float64)]) - @np._no_nep50_warning() - def test_float_int_pyscalar_promote_rational( - self, weak_promotion, other, expected): + def test_float_int_pyscalar_promote_rational(self, other, expected): # Note that rationals are a bit awkward as they promote with float64 # or default ints, but not float16 or uint8/int8 (which looks - # inconsistent here). The new promotion fixes this (partially?) - if not weak_promotion and type(other) == float: - # The float version, checks float16 in the legacy path, which fails - # the integer version seems to check int8 (also), so it can - # pass. - with pytest.raises(TypeError, - match=r".* do not have a common DType"): - np.result_type(other, rational) - else: - assert np.result_type(other, rational) == expected - + # inconsistent here). The new promotion fixed this (partially?) + assert np.result_type(other, rational) == expected assert np.result_type(other, rational(1, 2)) == expected @pytest.mark.parametrize(["dtypes", "expected"], [ @@ -1546,7 +1526,7 @@ def test_float_int_pyscalar_promote_rational( ]) def test_permutations_do_not_influence_result(self, dtypes, expected): # Tests that most permutations do not influence the result. In the - # above some uint and int combintations promote to a larger integer + # above some uint and int combinations promote to a larger integer # type, which would then promote to a larger than necessary float. for perm in permutations(dtypes): assert np.result_type(*perm) == expected diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 0a97693f73b0..636c97f03e87 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1,6 +1,4 @@ import itertools -import sys -import platform import pytest @@ -308,7 +306,6 @@ def test_einsum_views(self): assert_(b.base is a) assert_equal(b, a.swapaxes(0, 1)) - @np._no_nep50_warning() def check_einsum_sums(self, dtype, do_opt=False): dtype = np.dtype(dtype) # Check various sums. Does many sizes to exercise unrolled loops. @@ -1028,7 +1025,7 @@ def test_broadcasting_dot_cases(self): def test_output_order(self): # Ensure output order is respected for optimize cases, the below - # conraction should yield a reshaped tensor view + # contraction should yield a reshaped tensor view # gh-16415 a = np.ones((2, 3, 5), order='F') diff --git a/numpy/_core/tests/test_errstate.py b/numpy/_core/tests/test_errstate.py index bd6b8b8caec3..628c9ddca411 100644 --- a/numpy/_core/tests/test_errstate.py +++ b/numpy/_core/tests/test_errstate.py @@ -68,7 +68,7 @@ def test_errstate_decorator(self): def foo(): a = -np.arange(3) a // 0 - + foo() def test_errstate_enter_once(self): diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index 333943212646..4f735b7ce359 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -448,7 +448,7 @@ def test_object(self): stop = array(2, dtype='O') y = linspace(start, stop, 3) assert_array_equal(y, array([1., 1.5, 2.])) - + def test_round_negative(self): y = linspace(-1, 3, num=8, dtype=int) t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int) @@ -460,7 +460,7 @@ def test_any_step_zero_and_not_mult_inplace(self): stop = array([2.0, 1.0]) y = linspace(start, stop, 3) assert_array_equal(y, array([[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]])) - + class TestAdd_newdoc: diff --git a/numpy/_core/tests/test_getlimits.py b/numpy/_core/tests/test_getlimits.py index 8378bad19391..3fe67a1f4037 100644 --- a/numpy/_core/tests/test_getlimits.py +++ b/numpy/_core/tests/test_getlimits.py @@ -1,6 +1,7 @@ """ Test functions for limits module. """ +import types import warnings import numpy as np import pytest @@ -77,10 +78,10 @@ def test_regression_gh23867(self): class NonHashableWithDtype: __hash__ = None dtype = np.dtype('float32') - + x = NonHashableWithDtype() assert np.finfo(x) == np.finfo(x.dtype) - + class TestIinfo: def test_basic(self): @@ -192,3 +193,11 @@ def test_plausible_finfo(): assert_(info.nmant > 1) assert_(info.minexp < -1) assert_(info.maxexp > 1) + + +class TestRuntimeSubscriptable: + def test_finfo_generic(self): + assert isinstance(np.finfo[np.float64], types.GenericAlias) + + def test_iinfo_generic(self): + assert isinstance(np.iinfo[np.int_], types.GenericAlias) diff --git a/numpy/_core/tests/test_half.py b/numpy/_core/tests/test_half.py index fbc1bf6a0a6d..0eced33b28f8 100644 --- a/numpy/_core/tests/test_half.py +++ b/numpy/_core/tests/test_half.py @@ -3,7 +3,7 @@ import numpy as np from numpy import uint16, float16, float32, float64 -from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM +from numpy.testing import assert_, assert_equal, IS_WASM def assert_raises_fpe(strmatch, callable, *args, **kwargs): @@ -22,7 +22,7 @@ def setup_method(self): self.all_f16 = np.arange(0x10000, dtype=uint16) self.all_f16.dtype = float16 - # NaN value can cause an invalid FP exception if HW is been used + # NaN value can cause an invalid FP exception if HW is being used with np.errstate(invalid='ignore'): self.all_f32 = np.array(self.all_f16, dtype=float32) self.all_f64 = np.array(self.all_f16, dtype=float64) @@ -49,7 +49,7 @@ def test_half_conversions(self): # Convert from float32 back to float16 with np.errstate(invalid='ignore'): b = np.array(self.all_f32, dtype=float16) - # avoid testing NaNs due to differ bits wither Q/SNaNs + # avoid testing NaNs due to differing bit patterns in Q/S NaNs b_nn = b == b assert_equal(self.all_f16[b_nn].view(dtype=uint16), b[b_nn].view(dtype=uint16)) @@ -93,7 +93,6 @@ def test_half_conversion_from_string(self, string_dt): @pytest.mark.parametrize("offset", [None, "up", "down"]) @pytest.mark.parametrize("shift", [None, "up", "down"]) @pytest.mark.parametrize("float_t", [np.float32, np.float64]) - @np._no_nep50_warning() def test_half_conversion_rounding(self, float_t, shift, offset): # Assumes that round to even is used during casting. max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) @@ -120,8 +119,8 @@ def test_half_conversion_rounding(self, float_t, shift, offset): # Convert back to float16 and its bit pattern: res_patterns = f16s_float.astype(np.float16).view(np.uint16) - # The above calculations tries the original values, or the exact - # mid points between the float16 values. It then further offsets them + # The above calculation tries the original values, or the exact + # midpoints between the float16 values. It then further offsets them # by as little as possible. If no offset occurs, "round to even" # logic will be necessary, an arbitrarily small offset should cause # normal up/down rounding always. @@ -460,8 +459,7 @@ def test_half_ufuncs(self): assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) - @np._no_nep50_warning() - def test_half_coercion(self, weak_promotion): + def test_half_coercion(self): """Test that half gets coerced properly with the other types""" a16 = np.array((1,), dtype=float16) a32 = np.array((1,), dtype=float32) @@ -471,14 +469,12 @@ def test_half_coercion(self, weak_promotion): assert np.power(a16, 2).dtype == float16 assert np.power(a16, 2.0).dtype == float16 assert np.power(a16, b16).dtype == float16 - expected_dt = float32 if weak_promotion else float16 - assert np.power(a16, b32).dtype == expected_dt + assert np.power(a16, b32).dtype == float32 assert np.power(a16, a16).dtype == float16 assert np.power(a16, a32).dtype == float32 - expected_dt = float16 if weak_promotion else float64 - assert np.power(b16, 2).dtype == expected_dt - assert np.power(b16, 2.0).dtype == expected_dt + assert np.power(b16, 2).dtype == float16 + assert np.power(b16, 2.0).dtype == float16 assert np.power(b16, b16).dtype, float16 assert np.power(b16, b32).dtype, float32 assert np.power(b16, a16).dtype, float16 @@ -486,8 +482,7 @@ def test_half_coercion(self, weak_promotion): assert np.power(a32, a16).dtype == float32 assert np.power(a32, b16).dtype == float32 - expected_dt = float32 if weak_promotion else float16 - assert np.power(b32, a16).dtype == expected_dt + assert np.power(b32, a16).dtype == float32 assert np.power(b32, b16).dtype == float32 @pytest.mark.skipif(platform.machine() == "armv5tel", diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 686caf9c7822..f393c401cd9b 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -367,7 +367,7 @@ def test_trivial_fancy_not_possible(self): assert_array_equal(a[idx], idx) # this case must not go into the fast path, note that idx is - # a non-contiuguous none 1D array here. + # a non-contiguous none 1D array here. a[idx] = -1 res = np.arange(6) res[0] = -1 @@ -409,15 +409,19 @@ def test_array_like_values(self): a[...] = memoryview(s) assert_array_equal(a, s) - def test_subclass_writeable(self): + @pytest.mark.parametrize("writeable", [True, False]) + def test_subclass_writeable(self, writeable): d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], dtype=[('target', 'S20'), ('V_mag', '>f4')]) + d.flags.writeable = writeable + # Advanced indexing results are always writeable: ind = np.array([False, True, True], dtype=bool) - assert_(d[ind].flags.writeable) + assert d[ind].flags.writeable ind = np.array([0, 1]) - assert_(d[ind].flags.writeable) - assert_(d[...].flags.writeable) - assert_(d[0].flags.writeable) + assert d[ind].flags.writeable + # Views should be writeable if the original array is: + assert d[...].flags.writeable == writeable + assert d[0].flags.writeable == writeable def test_memory_order(self): # This is not necessary to preserve. Memory layouts for @@ -668,12 +672,12 @@ def test_simple_broadcasting_errors(self): ([0, 1], ..., 0), (..., [1, 2], [1, 2])]) def test_broadcast_error_reports_correct_shape(self, index): - values = np.zeros((100, 100)) # will never broadcast below + values = np.zeros((100, 100)) # will never broadcast below arr = np.zeros((3, 4, 5, 6, 7)) # We currently report without any spaces (could be changed) shape_str = str(arr[index].shape).replace(" ", "") - + with pytest.raises(ValueError) as e: arr[index] = values diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 5a23b49171a0..d476456fb6e1 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -1,5 +1,4 @@ import os -import shutil import subprocess import sys import sysconfig @@ -41,6 +40,14 @@ def install_temp(tmpdir_factory): srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'limited_api') build_dir = tmpdir_factory.mktemp("limited_api") / "build" os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + try: subprocess.check_call(["meson", "--version"]) except FileNotFoundError: @@ -49,11 +56,13 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "setup", "--werror", "--buildtype=release", - "--vsenv", str(srcdir)], + "--vsenv", "--native-file", native_file, + str(srcdir)], cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", "--werror", str(srcdir)], + subprocess.check_call(["meson", "setup", "--werror", + "--native-file", native_file, str(srcdir)], cwd=build_dir ) try: diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 4ea70c044d51..49a6b90da118 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -235,7 +235,7 @@ def iter_random_view_pairs(x, same_steps=True, equal_size=False): rng = np.random.RandomState(1234) if equal_size and same_steps: - raise ValueError() + raise ValueError def random_slice(n, step): start = rng.randint(0, n+1, dtype=np.intp) diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 32459ab4d999..9846f89c404c 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -3,7 +3,6 @@ import os import sys import threading -import warnings import pytest diff --git a/numpy/_core/tests/test_memmap.py b/numpy/_core/tests/test_memmap.py index 9603e8316e1d..4ee8444432ad 100644 --- a/numpy/_core/tests/test_memmap.py +++ b/numpy/_core/tests/test_memmap.py @@ -199,6 +199,13 @@ def test_mmap_offset_greater_than_allocation_granularity(self): fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) assert_(fp.offset == offset) + def test_empty_array_with_offset_multiple_of_allocation_granularity(self): + self.tmpfp.write(b'a'*mmap.ALLOCATIONGRANULARITY) + size = 0 + offset = mmap.ALLOCATIONGRANULARITY + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_equal(fp.offset, offset) + def test_no_shape(self): self.tmpfp.write(b'a'*16) mm = memmap(self.tmpfp, dtype='float64') @@ -207,13 +214,15 @@ def test_no_shape(self): def test_empty_array(self): # gh-12653 with pytest.raises(ValueError, match='empty file'): - memmap(self.tmpfp, shape=(0,4), mode='w+') + memmap(self.tmpfp, shape=(0, 4), mode='r') - self.tmpfp.write(b'\0') + # gh-27723 + # empty memmap works with mode in ('w+','r+') + memmap(self.tmpfp, shape=(0, 4), mode='w+') # ok now the file is not empty - memmap(self.tmpfp, shape=(0,4), mode='w+') - + memmap(self.tmpfp, shape=(0, 4), mode='w+') + def test_shape_type(self): memmap(self.tmpfp, shape=3, mode='w+') memmap(self.tmpfp, shape=self.shape, mode='w+') diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 441d76af9228..7ac22869495f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -30,7 +30,7 @@ assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - _SUPPORTS_SVE, assert_array_compare, + check_support_sve, assert_array_compare, ) from numpy.testing._private.utils import requires_memory, _no_tracing from numpy._core.tests._locales import CommaDecimalPointLocale @@ -1097,14 +1097,14 @@ def __len__(self): return 1 def __getitem__(self, index): - raise ValueError() + raise ValueError class Map: def __len__(self): return 1 def __getitem__(self, index): - raise KeyError() + raise KeyError a = np.array([Map()]) assert_(a.shape == (1,)) @@ -1121,7 +1121,7 @@ def __getitem__(self, ind): if ind in [0, 1]: return ind else: - raise IndexError() + raise IndexError d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) @@ -4767,7 +4767,7 @@ class TestArgmax: ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), # To hit the tail of SIMD multi-level(x4, x1) inner loops - # on variant SIMD widthes + # on variant SIMD widths ([1] * (2*5-1) + [np.nan], 2*5-1), ([1] * (4*5-1) + [np.nan], 4*5-1), ([1] * (8*5-1) + [np.nan], 8*5-1), @@ -4910,7 +4910,7 @@ class TestArgmin: ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), # To hit the tail of SIMD multi-level(x4, x1) inner loops - # on variant SIMD widthes + # on variant SIMD widths ([1] * (2*5-1) + [np.nan], 2*5-1), ([1] * (4*5-1) + [np.nan], 4*5-1), ([1] * (8*5-1) + [np.nan], 8*5-1), @@ -5374,6 +5374,13 @@ def test_object(self): # gh-6312 u, v = np.array(u, dtype='object'), np.array(v, dtype='object') assert_array_equal(idx, np.lexsort((u, v))) + def test_strings(self): # gh-27984 + for dtype in "TU": + surnames = np.array(['Hertz', 'Galilei', 'Hertz'], dtype=dtype) + first_names = np.array(['Heinrich', 'Galileo', 'Gustav'], dtype=dtype) + assert_array_equal(np.lexsort((first_names, surnames)), [1, 2, 0]) + + def test_invalid_axis(self): # gh-7528 x = np.linspace(0., 1., 42*3).reshape(42, 3) assert_raises(AxisError, np.lexsort, x, axis=2) @@ -5478,7 +5485,7 @@ def test_roundtrip_str(self, x): def test_roundtrip_repr(self, x): x = x.real.ravel() - s = "@".join(map(lambda x: repr(x)[11:-1], x)) + s = "@".join((repr(x)[11:-1] for x in x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) @@ -8592,7 +8599,7 @@ def __array__(self, dtype=None, copy=None): def test__array__reference_leak(self): class NotAnArray: def __array__(self, dtype=None, copy=None): - raise NotImplementedError() + raise NotImplementedError x = NotAnArray() @@ -8900,7 +8907,8 @@ def test_to_bool_scalar(self): assert_equal(bool(np.array([False])), False) assert_equal(bool(np.array([True])), True) assert_equal(bool(np.array([[42]])), True) - assert_raises(ValueError, bool, np.array([1, 2])) + + def test_to_bool_scalar_not_convertible(self): class NotConvertible: def __bool__(self): @@ -8919,6 +8927,16 @@ def __bool__(self): assert_raises(Error, bool, self_containing) # previously stack overflow self_containing[0] = None # resolve circular reference + def test_to_bool_scalar_size_errors(self): + with pytest.raises(ValueError, match=".*one element is ambiguous"): + bool(np.array([1, 2])) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((3, 0))) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((0,))) + def test_to_int_scalar(self): # gh-9972 means that these aren't always the same int_funcs = (int, lambda x: x.__int__()) @@ -9174,6 +9192,12 @@ def test_resize(self): d.resize(150) assert_(old < sys.getsizeof(d)) + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") @@ -9708,7 +9732,8 @@ def __array_finalize__(self, obj): raise Exception(self) # a plain object can't be weakref'd - class Dummy: pass + class Dummy: + pass # get a weak reference to an object within an array obj_arr = np.array(Dummy()) @@ -9789,7 +9814,7 @@ class MyArr(np.ndarray): def __array_wrap__(self, new, context=None, return_scalar=False): type(self).called_wrap += 1 - return super().__array_wrap__(new) + return super().__array_wrap__(new, context, return_scalar) numpy_arr = np.zeros(5, dtype=dt1) my_arr = np.zeros(5, dtype=dt2).view(MyArr) @@ -9975,7 +10000,7 @@ def check(self, shape, dtype, order, align): elif order is None: assert_(x.flags.c_contiguous, err_msg) else: - raise ValueError() + raise ValueError def test_various_alignments(self): for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: @@ -10101,7 +10126,7 @@ def test_non_c_contiguous(self): assert_array_equal(x.view(' 2: i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) - assert_equal([x for x in i], a) + assert_equal(list(i), a) def test_iter_c_order(): # Test forcing C order @@ -123,14 +123,14 @@ def test_iter_c_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='C') - assert_equal([x for x in i], aview.ravel(order='C')) + assert_equal(list(i), aview.ravel(order='C')) # Fortran-order i = nditer(aview.T, order='C') - assert_equal([x for x in i], aview.T.ravel(order='C')) + assert_equal(list(i), aview.T.ravel(order='C')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='C') - assert_equal([x for x in i], + assert_equal(list(i), aview.swapaxes(0, 1).ravel(order='C')) def test_iter_f_order(): @@ -150,14 +150,14 @@ def test_iter_f_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='F') - assert_equal([x for x in i], aview.ravel(order='F')) + assert_equal(list(i), aview.ravel(order='F')) # Fortran-order i = nditer(aview.T, order='F') - assert_equal([x for x in i], aview.T.ravel(order='F')) + assert_equal(list(i), aview.T.ravel(order='F')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='F') - assert_equal([x for x in i], + assert_equal(list(i), aview.swapaxes(0, 1).ravel(order='F')) def test_iter_c_or_f_order(): @@ -177,14 +177,14 @@ def test_iter_c_or_f_order(): aview = a.reshape(shape)[dirs_index] # C-order i = nditer(aview, order='A') - assert_equal([x for x in i], aview.ravel(order='A')) + assert_equal(list(i), aview.ravel(order='A')) # Fortran-order i = nditer(aview.T, order='A') - assert_equal([x for x in i], aview.T.ravel(order='A')) + assert_equal(list(i), aview.T.ravel(order='A')) # Other order if len(shape) > 2: i = nditer(aview.swapaxes(0, 1), order='A') - assert_equal([x for x in i], + assert_equal(list(i), aview.swapaxes(0, 1).ravel(order='A')) def test_nditer_multi_index_set(): @@ -195,7 +195,7 @@ def test_nditer_multi_index_set(): # Removes the iteration on two first elements of a[0] it.multi_index = (0, 2,) - assert_equal([i for i in it], [2, 3, 4, 5]) + assert_equal(list(it), [2, 3, 4, 5]) @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") def test_nditer_multi_index_set_refcount(): @@ -271,7 +271,7 @@ def test_iter_best_order_multi_index_3d(): assert_equal(iter_multi_index(i), [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['multi_index'], [['readonly']]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) @@ -286,7 +286,7 @@ def test_iter_best_order_multi_index_3d(): assert_equal(iter_multi_index(i), [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], ['multi_index'], [['readonly']]) assert_equal(iter_multi_index(i), [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), @@ -352,7 +352,7 @@ def test_iter_best_order_c_index_3d(): i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['c_index'], [['readonly']]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) # 3D reversed Fortran-order @@ -364,7 +364,7 @@ def test_iter_best_order_c_index_3d(): ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], ['c_index'], [['readonly']]) assert_equal(iter_indices(i), [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) @@ -429,7 +429,7 @@ def test_iter_best_order_f_index_3d(): i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) - i = nditer(a.reshape(2, 3, 2)[:,:, ::-1], ['f_index'], [['readonly']]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) # 3D reversed Fortran-order @@ -441,7 +441,7 @@ def test_iter_best_order_f_index_3d(): ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) - i = nditer(a.reshape(2, 3, 2).copy(order='F')[:,:, ::-1], + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], ['f_index'], [['readonly']]) assert_equal(iter_indices(i), [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) @@ -481,15 +481,15 @@ def test_iter_no_inner_dim_coalescing(): # Skipping the last element in a dimension prevents coalescing # with the next-bigger dimension - a = arange(24).reshape(2, 3, 4)[:,:, :-1] + a = arange(24).reshape(2, 3, 4)[:, :, :-1] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (3,)) - a = arange(24).reshape(2, 3, 4)[:, :-1,:] + a = arange(24).reshape(2, 3, 4)[:, :-1, :] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 2) assert_equal(i[0].shape, (8,)) - a = arange(24).reshape(2, 3, 4)[:-1,:,:] + a = arange(24).reshape(2, 3, 4)[:-1, :, :] i = nditer(a, ['external_loop'], [['readonly']]) assert_equal(i.ndim, 1) assert_equal(i[0].shape, (12,)) @@ -761,9 +761,9 @@ def test_iter_flags_errors(): a.flags.writeable = True # Multi-indices available only with the multi_index flag i = nditer(arange(6), [], [['readonly']]) - assert_raises(ValueError, lambda i:i.multi_index, i) + assert_raises(ValueError, lambda i: i.multi_index, i) # Index available only with an index flag - assert_raises(ValueError, lambda i:i.index, i) + assert_raises(ValueError, lambda i: i.index, i) # GotoCoords and GotoIndex incompatible with buffering or no_inner def assign_multi_index(i): @@ -911,7 +911,7 @@ def test_iter_array_cast(): # The memory layout of the temporary should match a (a is (48,4,16)) # except negative strides get flipped to positive strides. assert_equal(i.operands[0].strides, (96, 8, 32)) - a = a[::-1,:, ::-1] + a = a[::-1, :, ::-1] i = nditer(a, [], [['readonly', 'copy']], casting='safe', op_dtypes=[np.dtype('f8')]) @@ -1049,7 +1049,7 @@ def test_iter_scalar_cast_errors(): def test_iter_object_arrays_basic(): # Check that object arrays work - obj = {'a':3,'b':'d'} + obj = {'a': 3, 'b': 'd'} a = np.array([[1, 2, 3], None, obj, None], dtype='O') if HAS_REFCOUNT: rc = sys.getrefcount(obj) @@ -1677,12 +1677,12 @@ def test_iter_remove_axis(): i = nditer(a, ['multi_index']) i.remove_axis(1) - assert_equal([x for x in i], a[:, 0,:].ravel()) + assert_equal(list(i), a[:, 0, :].ravel()) - a = a[::-1,:,:] + a = a[::-1, :, :] i = nditer(a, ['multi_index']) i.remove_axis(0) - assert_equal([x for x in i], a[0,:,:].ravel()) + assert_equal(list(i), a[0, :, :].ravel()) def test_iter_remove_multi_index_inner_loop(): # Check that removing multi-index support works @@ -1695,13 +1695,13 @@ def test_iter_remove_multi_index_inner_loop(): assert_equal(i.itviews[0].shape, (2, 3, 4)) # Removing the multi-index tracking causes all dimensions to coalesce - before = [x for x in i] + before = list(i) i.remove_multi_index() - after = [x for x in i] + after = list(i) assert_equal(before, after) assert_equal(i.ndim, 1) - assert_raises(ValueError, lambda i:i.shape, i) + assert_raises(ValueError, lambda i: i.shape, i) assert_equal(i.itviews[0].shape, (24,)) # Removing the inner loop means there's just one iteration @@ -1847,9 +1847,9 @@ def test_iter_buffering_delayed_alloc(): casting='unsafe', op_dtypes='f4') assert_(i.has_delayed_bufalloc) - assert_raises(ValueError, lambda i:i.multi_index, i) - assert_raises(ValueError, lambda i:i[0], i) - assert_raises(ValueError, lambda i:i[0:2], i) + assert_raises(ValueError, lambda i: i.multi_index, i) + assert_raises(ValueError, lambda i: i[0], i) + assert_raises(ValueError, lambda i: i[0:2], i) def assign_iter(i): i[0] = 0 @@ -2240,7 +2240,7 @@ def test_iter_buffered_cast_subarray(): for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) - assert_equal(x['a'][2,:], [0, 0]) + assert_equal(x['a'][2, :], [0, 0]) count += 1 # matrix -> matrix (truncates and zero-pads) @@ -2256,7 +2256,7 @@ def test_iter_buffered_cast_subarray(): for x in i: assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) - assert_equal(x['a'][2,:], [0, 0]) + assert_equal(x['a'][2, :], [0, 0]) count += 1 def test_iter_buffering_badwriteback(): @@ -2549,7 +2549,7 @@ def test_0d(self): vals = [] for x in i: for y in j: - vals.append([z for z in k]) + vals.append(list(k)) assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) def test_iter_nested_iters_dtype_buffered(self): @@ -2689,11 +2689,11 @@ def test_iter_buffering_reduction(): assert_equal(it[0], [1, 2, 1, 2]) # Iterator inner loop should take argument contiguity into account - x = np.ones((7, 13, 8), np.int8)[4:6,1:11:6,1:5].transpose(1, 2, 0) + x = np.ones((7, 13, 8), np.int8)[4:6, 1:11:6, 1:5].transpose(1, 2, 0) x[...] = np.arange(x.size).reshape(x.shape) y_base = np.arange(4*4, dtype=np.int8).reshape(4, 4) y_base_copy = y_base.copy() - y = y_base[::2,:,None] + y = y_base[::2, :, None] it = np.nditer([y, x], ['buffered', 'external_loop', 'reduce_ok'], @@ -3120,7 +3120,7 @@ def test_writebacks(): assert_equal(au.flags.writeable, False) it.operands[0][:] = 0 raise ValueError('exit context manager on exception') - except: + except Exception: pass assert_equal(au, 0) assert_equal(au.flags.writeable, True) @@ -3174,7 +3174,7 @@ def test_close_equivalent(): def add_close(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) for (a, b, c) in it: addop(a, b, out=c) ret = it.operands[2] @@ -3184,7 +3184,7 @@ def add_close(x, y, out=None): def add_context(x, y, out=None): addop = np.add it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) with it: for (a, b, c) in it: addop(a, b, out=c) diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index ab800cb5b959..9eec02239e34 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -17,65 +17,40 @@ from numpy.testing import assert_array_equal, IS_WASM -@pytest.fixture(scope="module", autouse=True) -def _weak_promotion_enabled(): - state = np._get_promotion_state() - np._set_promotion_state("weak_and_warn") - yield - np._set_promotion_state(state) - - @pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for fp errors") def test_nep50_examples(): - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.uint8(1) + 2 + res = np.uint8(1) + 2 assert res.dtype == np.uint8 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1], np.uint8) + np.int64(1) + res = np.array([1], np.uint8) + np.int64(1) assert res.dtype == np.int64 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) + res = np.array([1], np.uint8) + np.array(1, dtype=np.int64) assert res.dtype == np.int64 - with pytest.warns(UserWarning, match="result dtype changed"): - # Note: For "weak_and_warn" promotion state the overflow warning is - # unfortunately not given (because we use the full array path). - with np.errstate(over="raise"): - res = np.uint8(100) + 200 + with pytest.warns(RuntimeWarning, match="overflow"): + res = np.uint8(100) + 200 assert res.dtype == np.uint8 - with pytest.warns(Warning) as recwarn: + with pytest.warns(RuntimeWarning, match="overflow"): res = np.float32(1) + 3e100 - # Check that both warnings were given in the one call: - warning = str(recwarn.pop(UserWarning).message) - assert warning.startswith("result dtype changed") - warning = str(recwarn.pop(RuntimeWarning).message) - assert warning.startswith("overflow") - assert len(recwarn) == 0 # no further warnings assert np.isinf(res) assert res.dtype == np.float32 - # Changes, but we don't warn for it (too noisy) res = np.array([0.1], np.float32) == np.float64(0.1) assert res[0] == False - # Additional test, since the above silences the warning: - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([0.1], np.float32) + np.float64(0.1) + res = np.array([0.1], np.float32) + np.float64(0.1) assert res.dtype == np.float64 - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.array([1.], np.float32) + np.int64(3) + res = np.array([1.], np.float32) + np.int64(3) assert res.dtype == np.float64 @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) def test_nep50_weak_integers(dtype): # Avoids warning (different code path for scalars) - np._set_promotion_state("weak") scalar_type = np.dtype(dtype).type maxint = int(np.iinfo(dtype).max) @@ -94,7 +69,6 @@ def test_nep50_weak_integers(dtype): @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) def test_nep50_weak_integers_with_inexact(dtype): # Avoids warning (different code path for scalars) - np._set_promotion_state("weak") scalar_type = np.dtype(dtype).type too_big_int = int(np.finfo(dtype).max) * 2 @@ -137,7 +111,6 @@ def test_nep50_weak_integers_with_inexact(dtype): @pytest.mark.parametrize("op", [operator.add, operator.pow]) def test_weak_promotion_scalar_path(op): # Some additional paths exercising the weak scalars. - np._set_promotion_state("weak") # Integer path: res = op(np.uint8(3), 5) @@ -154,8 +127,6 @@ def test_weak_promotion_scalar_path(op): def test_nep50_complex_promotion(): - np._set_promotion_state("weak") - with pytest.warns(RuntimeWarning, match=".*overflow"): res = np.complex64(3) + complex(2**300) @@ -163,8 +134,6 @@ def test_nep50_complex_promotion(): def test_nep50_integer_conversion_errors(): - # Do not worry about warnings here (auto-fixture will reset). - np._set_promotion_state("weak") # Implementation for error paths is mostly missing (as of writing) with pytest.raises(OverflowError, match=".*uint8"): np.array([1], np.uint8) + 300 @@ -178,51 +147,24 @@ def test_nep50_integer_conversion_errors(): np.uint8(1) + -1 -def test_nep50_integer_regression(): - # Test the old integer promotion rules. When the integer is too large, - # we need to keep using the old-style promotion. - np._set_promotion_state("legacy") - arr = np.array(1) - assert (arr + 2**63).dtype == np.float64 - assert (arr[()] + 2**63).dtype == np.float64 - - def test_nep50_with_axisconcatenator(): - # I promised that this will be an error in the future in the 1.25 - # release notes; test this (NEP 50 opt-in makes the deprecation an error). - np._set_promotion_state("weak") - + # Concatenate/r_ does not promote, so this has to error: with pytest.raises(OverflowError): np.r_[np.arange(5, dtype=np.int8), 255] @pytest.mark.parametrize("ufunc", [np.add, np.power]) -@pytest.mark.parametrize("state", ["weak", "weak_and_warn"]) -def test_nep50_huge_integers(ufunc, state): +def test_nep50_huge_integers(ufunc): # Very large integers are complicated, because they go to uint64 or - # object dtype. This tests covers a few possible paths (some of which - # cannot give the NEP 50 warnings). - np._set_promotion_state(state) - + # object dtype. This tests covers a few possible paths. with pytest.raises(OverflowError): ufunc(np.int64(0), 2**63) # 2**63 too large for int64 - if state == "weak_and_warn": - with pytest.warns(UserWarning, - match="result dtype changed.*float64.*uint64"): - with pytest.raises(OverflowError): - ufunc(np.uint64(0), 2**64) - else: - with pytest.raises(OverflowError): - ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 # However, 2**63 can be represented by the uint64 (and that is used): - if state == "weak_and_warn": - with pytest.warns(UserWarning, - match="result dtype changed.*float64.*uint64"): - res = ufunc(np.uint64(1), 2**63) - else: - res = ufunc(np.uint64(1), 2**63) + res = ufunc(np.uint64(1), 2**63) assert res.dtype == np.uint64 assert res == ufunc(1, 2**63, dtype=object) @@ -240,14 +182,10 @@ def test_nep50_huge_integers(ufunc, state): def test_nep50_in_concat_and_choose(): - np._set_promotion_state("weak_and_warn") - - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.concatenate([np.float32(1), 1.], axis=None) + res = np.concatenate([np.float32(1), 1.], axis=None) assert res.dtype == "float32" - with pytest.warns(UserWarning, match="result dtype changed"): - res = np.choose(1, [np.float32(1), 1.]) + res = np.choose(1, [np.float32(1), 1.]) assert res.dtype == "float32" @@ -263,8 +201,6 @@ def test_nep50_in_concat_and_choose(): ]) @hypothesis.given(data=strategies.data()) def test_expected_promotion(expected, dtypes, optional_dtypes, data): - np._set_promotion_state("weak") - # Sample randomly while ensuring "dtypes" is always present: optional = data.draw(strategies.lists( strategies.sampled_from(dtypes + optional_dtypes))) @@ -284,8 +220,6 @@ def test_expected_promotion(expected, dtypes, optional_dtypes, data): [operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt]) def test_integer_comparison(sctype, other_val, comp): - np._set_promotion_state("weak") - # Test that comparisons with integers (especially out-of-bound) ones # works correctly. val_obj = 10 @@ -303,12 +237,24 @@ def test_integer_comparison(sctype, other_val, comp): assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + @pytest.mark.parametrize("comp", [np.equal, np.not_equal, np.less_equal, np.less, np.greater_equal, np.greater]) def test_integer_integer_comparison(comp): - np._set_promotion_state("weak") - # Test that the NumPy comparison ufuncs work with large Python integers assert comp(2**200, -2**200) == comp(2**200, -2**200, dtype=object) @@ -342,26 +288,3 @@ def test_oob_creation(sctype, create): assert create(sctype, iinfo.min) == iinfo.min assert create(sctype, iinfo.max) == iinfo.max - - -@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") -def test_thread_local_promotion_state(): - b = threading.Barrier(2) - - def legacy_no_warn(): - np._set_promotion_state("legacy") - b.wait() - assert np._get_promotion_state() == "legacy" - - def weak_warn(): - np._set_promotion_state("weak") - b.wait() - assert np._get_promotion_state() == "weak" - - task1 = threading.Thread(target=legacy_no_warn) - task2 = threading.Thread(target=weak_warn) - - task1.start() - task2.start() - task1.join() - task2.join() diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index ee0d1bbfee1e..8e63536cbd55 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -184,6 +184,7 @@ def test_reshape_shape_arg(self): assert_equal(np.reshape(arr, shape), expected) assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) assert_equal(np.reshape(arr, shape=shape), expected) assert_equal(np.reshape(arr, shape=shape, order="C"), expected) with pytest.warns(DeprecationWarning): @@ -333,7 +334,7 @@ def test_take(self): tgt = np.array([1, 3, 3, 4], dtype=array_type) out = np.take(x, ind) assert_equal(out, tgt) - assert_equal(out.dtype, tgt.dtype) + assert_equal(out.dtype, tgt.dtype) def test_trace(self): c = [[1, 2], [3, 4], [5, 6]] @@ -343,6 +344,7 @@ def test_transpose(self): arr = [[1, 2], [3, 4], [5, 6]] tgt = [[1, 3, 5], [2, 4, 6]] assert_equal(np.transpose(arr, (1, 0)), tgt) + assert_equal(np.transpose(arr, (-1, -2)), tgt) assert_equal(np.matrix_transpose(arr), tgt) def test_var(self): @@ -1487,21 +1489,22 @@ def test_can_cast_structured_to_simple(self): assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', casting='unsafe')) - @pytest.mark.xfail(np._get_promotion_state() != "legacy", - reason="NEP 50: no python int/float/complex support (yet)") def test_can_cast_values(self): - # gh-5917 - for dt in sctypes['int'] + sctypes['uint']: - ii = np.iinfo(dt) - assert_(np.can_cast(ii.min, dt)) - assert_(np.can_cast(ii.max, dt)) - assert_(not np.can_cast(ii.min - 1, dt)) - assert_(not np.can_cast(ii.max + 1, dt)) - - for dt in sctypes['float']: - fi = np.finfo(dt) - assert_(np.can_cast(fi.min, dt)) - assert_(np.can_cast(fi.max, dt)) + # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could + # define this as (usually safe) at some point, and already do so + # in `copyto` and ufuncs (but there an error is raised if the integer + # is out of bounds and a warning for out-of-bound floats). + # Raises even for unsafe, previously checked within range (for floats + # that was approximately whether it would overflow to inf). + with pytest.raises(TypeError): + np.can_cast(4, "int8", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4.0, "float64", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4j, "complex128", casting="unsafe") + @pytest.mark.parametrize("dtype", list("?bhilqBHILQefdgFDG") + [rational]) @@ -1707,6 +1710,23 @@ def test_sparse(self): assert_equal(np.nonzero(c)[0], np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2]))) + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_nonzero_float_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = ((2**33)*rng.normal(size=100)).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + @pytest.mark.parametrize('dtype', [bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_nonzero_integer_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = rng.integers(0, 255, size=100).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + def test_return_type(self): class C(np.ndarray): pass @@ -2176,9 +2196,9 @@ class TestArrayComparisons: ) def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): """ - This test array_equal for a few combinaison: + This test array_equal for a few combinations: - - are the two inputs the same object or not (same object many not + - are the two inputs the same object or not (same object may not be equal if contains NaNs) - Whether we should consider or not, NaNs, being equal. @@ -2190,14 +2210,21 @@ def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): assert_(res is expected) assert_(type(res) is bool) + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + def test_none_compares_elementwise(self): a = np.array([None, 1, None], dtype=object) - assert_equal(a == None, [True, False, True]) - assert_equal(a != None, [False, True, False]) + assert_equal(a == None, [True, False, True]) # noqa: E711 + assert_equal(a != None, [False, True, False]) # noqa: E711 a = np.ones(3) - assert_equal(a == None, [False, False, False]) - assert_equal(a != None, [True, True, True]) + assert_equal(a == None, [False, False, False]) # noqa: E711 + assert_equal(a != None, [True, True, True]) # noqa: E711 def test_array_equiv(self): res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) @@ -3693,6 +3720,18 @@ def test_roll_empty(self): x = np.array([]) assert_equal(np.roll(x, 1), np.array([])) + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63+2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + class TestRollaxis: diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index 1ac2277b5de7..cd20ceb5ac7f 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -11,11 +11,11 @@ import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_raises_regex) +from numpy.testing.overrides import get_overridable_numpy_array_functions from numpy._core.overrides import ( _get_implementing_args, array_function_dispatch, verify_matching_signatures) - def _return_not_implemented(self, *args, **kwargs): return NotImplemented @@ -194,14 +194,30 @@ class OverrideSub(np.ndarray): assert_equal(result, expected.view(OverrideSub)) def test_no_wrapper(self): - # This shouldn't happen unless a user intentionally calls - # __array_function__ with invalid arguments, but check that we raise - # an appropriate error all the same. + # Regular numpy functions have wrappers, but do not presume + # all functions do (array creation ones do not): check that + # we just call the function in that case. array = np.array(1) - func = lambda x: x - with assert_raises_regex(AttributeError, '_implementation'): - array.__array_function__(func=func, types=(np.ndarray,), - args=(array,), kwargs={}) + func = lambda x: x * 2 + result = array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + assert_equal(result, array * 2) + + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) + + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) class TestArrayFunctionDispatch: @@ -560,6 +576,13 @@ def __init__(self, function=None): self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + class MySubclass(np.ndarray): + def __array_function__(self, func, types, args, kwargs): + result = super().__array_function__(func, types, args, kwargs) + return result.view(self.__class__) + + self.MySubclass = MySubclass + def add_method(self, name, arr_class, enable_value_error=False): def _definition(*args, **kwargs): # Check that `like=` isn't propagated downstream @@ -604,6 +627,28 @@ def test_array_like_not_implemented(self): delimiter=',')), ] + + def test_nep35_functions_as_array_functions(self,): + all_array_functions = get_overridable_numpy_array_functions() + like_array_functions_subset = { + getattr(np, func_name) for func_name, *_ in self.__class__._array_tests + } + assert like_array_functions_subset.issubset(all_array_functions) + + nep35_python_functions = { + np.eye, np.fromfunction, np.full, np.genfromtxt, + np.identity, np.loadtxt, np.ones, np.require, np.tri, + } + assert nep35_python_functions.issubset(all_array_functions) + + nep35_C_functions = { + np.arange, np.array, np.asanyarray, np.asarray, + np.ascontiguousarray, np.asfortranarray, np.empty, + np.frombuffer, np.fromfile, np.fromiter, np.fromstring, + np.zeros, + } + assert nep35_C_functions.issubset(all_array_functions) + @pytest.mark.parametrize('function, args, kwargs', _array_tests) @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like(self, function, args, kwargs, numpy_ref): @@ -653,6 +698,19 @@ def test_no_array_function_like(self, function, args, kwargs, ref): 'The `like` argument must be an array-like that implements'): np_func(*like_args, **kwargs, like=ref) + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_subclass(self, function, args, kwargs): + ref = np.array(1).view(self.MySubclass) + np_func = getattr(np, function) + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + assert type(array_like) is self.MySubclass + if np_func is np.empty: + return + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + assert_equal(array_like.view(np.ndarray), np_arr) + @pytest.mark.parametrize('numpy_ref', [True, False]) def test_array_like_fromfile(self, numpy_ref): self.add_method('array', self.MyArray) @@ -709,7 +767,7 @@ def test_like_as_none(self, function, args, kwargs): def test_function_like(): # We provide a `__get__` implementation, make sure it works - assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher + assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher class MyClass: def __array__(self, dtype=None, copy=None): diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 151fa4e68727..97946cdb0fa3 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -146,7 +146,7 @@ def test_0d_recarray_repr(self): dtype=[('f0', '1 ndim). + arr = np.tile(string_array, tile) + + res = np.add.accumulate(arr, axis=0) + res_obj = np.add.accumulate(arr.astype(object), axis=0) + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + if arr.ndim > 1: + res = np.add.accumulate(arr, axis=-1) + res_obj = np.add.accumulate(arr.astype(object), axis=-1) + + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + class TestImplementation: """Check that strings are stored in the arena when possible. diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index a94b52939b1d..9fe4c2693599 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -5,7 +5,7 @@ import numpy as np from numpy.testing import assert_array_equal, assert_raises, IS_PYPY - +from numpy.testing._private.utils import requires_memory COMPARISONS = [ (operator.eq, np.equal, "=="), @@ -109,6 +109,88 @@ def test_float_to_string_cast(str_dt, float_dt): assert_array_equal(res, np.array(expected, dtype=str_dt)) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 26b6a1aa5c27..7ca2f21df363 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -27,7 +27,7 @@ UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] # Remove functions that do not support `floats` -UNARY_OBJECT_UFUNCS.remove(getattr(np, 'bitwise_count')) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) class TestUfuncKwargs: @@ -486,8 +486,8 @@ def test_signature_dtype_type(self): np.add(3, 4, signature=(float_dtype, float_dtype, None)) @pytest.mark.parametrize("get_kwarg", [ - lambda dt: dict(dtype=x), - lambda dt: dict(signature=(x, None, None))]) + lambda dt: dict(dtype=dt), + lambda dt: dict(signature=(dt, None, None))]) def test_signature_dtype_instances_allowed(self, get_kwarg): # We allow certain dtype instances when there is a clear singleton # and the given one is equivalent; mainly for backcompat. @@ -537,9 +537,6 @@ def test_partial_signature_mismatch_with_cache(self): with pytest.raises(TypeError): np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) - @pytest.mark.xfail(np._get_promotion_state() != "legacy", - reason="NEP 50 impl breaks casting checks when `dtype=` is used " - "together with python scalars.") def test_use_output_signature_for_all_arguments(self): # Test that providing only `dtype=` or `signature=(None, None, dtype)` # is sufficient if falling back to a homogeneous signature works. @@ -827,20 +824,77 @@ def test_vecdot(self): actual3 = np.vecdot(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) - def test_vecdot_complex(self): - arr1 = np.array([1, 2j, 3]) - arr2 = np.array([1, 2, 3]) + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) - actual = np.vecdot(arr1, arr2) - expected = np.array([10-4j]) assert_array_equal(actual, expected) - actual2 = np.vecdot(arr2, arr1) - assert_array_equal(actual2, expected.conj()) + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) - actual3 = np.vecdot(arr1.astype("object"), arr2.astype("object")) + actual3 = np.matvec(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1.+1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + def test_vecdot_subclass(self): class MySubclass(np.ndarray): pass @@ -1647,51 +1701,46 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 - def check_identityless_reduction(self, a): - # np.minimum.reduce is an identityless reduction + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + yield a - # Verify that it sees the zero at various positions + @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, a, pos): + # np.minimum.reduce is an identityless reduction a[...] = 1 - a[1, 0, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) - assert_equal(np.minimum.reduce(a, axis=0), - [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 1, 1, 1], [0, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 1, 1], [0, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + a[pos] = 0 - a[...] = 1 - a[0, 1, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 0, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) - a[...] = 1 - a[0, 0, 1] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 0, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[0, 1, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, @@ -1706,30 +1755,6 @@ def test_identityless_reduction_huge_array(self): assert res[0] == 3 assert res[-1] == 4 - def test_identityless_reduction_corder(self): - a = np.empty((2, 3, 4), order='C') - self.check_identityless_reduction(a) - - def test_identityless_reduction_forder(self): - a = np.empty((2, 3, 4), order='F') - self.check_identityless_reduction(a) - - def test_identityless_reduction_otherorder(self): - a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig(self): - a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig_unaligned(self): - a = np.empty((3*4*5*8 + 1,), dtype='i1') - a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - def test_reduce_identity_depends_on_loop(self): """ The type of the result should always depend on the selected loop, not @@ -2547,8 +2572,8 @@ def test_reducelike_out_promotes(self): assert single_res != res def test_reducelike_output_needs_identical_cast(self): - # Checks the case where the we have a simple byte-swap works, maily - # tests that this is not rejected directly. + # Checks the case where a simple byte-swap works, mainly tests that + # this is not rejected directly. # (interesting because we require descriptor identity in reducelikes). arr = np.ones(20, dtype="f8") out = np.empty((), dtype=arr.dtype.newbyteorder()) @@ -2749,7 +2774,6 @@ def test_ufunc_types(ufunc): @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) if isinstance(getattr(np, x), np.ufunc)]) -@np._no_nep50_warning() def test_ufunc_noncontiguous(ufunc): ''' Check that contiguous and non-contiguous calls to ufuncs @@ -2761,9 +2785,9 @@ def test_ufunc_noncontiguous(ufunc): # bool, object, datetime are too irregular for this simple test continue inp, out = typ.split('->') - args_c = [np.empty(6, t) for t in inp] - # non contiguous (3 step) - args_n = [np.empty(18, t)[::3] for t in inp] + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] # alignment != itemsize is possible. So create an array with such # an odd step manually. args_o = [] @@ -2771,10 +2795,9 @@ def test_ufunc_noncontiguous(ufunc): orig_dt = np.dtype(t) off_dt = f"S{orig_dt.alignment}" # offset by alignment dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) - args_o.append(np.empty(6, dtype=dtype)["t"]) - + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) for a in args_c + args_n + args_o: - a.flat = range(1,7) + a.flat = range(1, 37) with warnings.catch_warnings(record=True): warnings.filterwarnings("always") @@ -2792,7 +2815,7 @@ def test_ufunc_noncontiguous(ufunc): # since different algorithms (libm vs. intrinsics) can be used # for different input strides res_eps = np.finfo(dt).eps - tol = 2*res_eps + tol = 3*res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 9a300f19764c..4d56c785d5a7 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4,7 +4,6 @@ import itertools import pytest import sys -import os import operator from fractions import Fraction from functools import reduce @@ -694,11 +693,11 @@ def test_floor_division_corner_cases(self, dtype): with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") div = np.floor_divide(fnan, fone) - assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + assert(np.isnan(div)), "div: %s" % div div = np.floor_divide(fone, fnan) - assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + assert(np.isnan(div)), "div: %s" % div div = np.floor_divide(fnan, fzer) - assert(np.isnan(div)), "dt: %s, div: %s" % (dt, div) + assert(np.isnan(div)), "div: %s" % div # verify 1.0//0.0 computations return inf with np.errstate(divide='ignore'): z = np.floor_divide(y, x) @@ -4017,6 +4016,31 @@ def test_array_ufunc_direct_call(self): res = a.__array_ufunc__(np.add, "__call__", a, a) assert_array_equal(res, a + a) + def test_ufunc_docstring(self): + original_doc = np.add.__doc__ + new_doc = "new docs" + expected_dict = ( + {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} + ) + + np.add.__doc__ = new_doc + assert np.add.__doc__ == new_doc + assert np.add.__dict__["__doc__"] == new_doc + + del np.add.__doc__ + assert np.add.__doc__ == original_doc + assert np.add.__dict__ == expected_dict + + np.add.__dict__["other"] = 1 + np.add.__dict__["__doc__"] = new_doc + assert np.add.__doc__ == new_doc + + del np.add.__dict__["__doc__"] + assert np.add.__doc__ == original_doc + del np.add.__dict__["other"] + assert np.add.__dict__ == expected_dict + + class TestChoose: def test_mixed(self): c = np.array([True, True]) @@ -4140,7 +4164,7 @@ def test_inf_and_nan(self): assert_raises(ValueError, np.gcd, 1, inf) assert_raises(ValueError, np.gcd, np.nan, inf) assert_raises(TypeError, np.gcd, 4, float(np.inf)) - + class TestRoundingFunctions: @@ -4761,7 +4785,8 @@ def test_signaling_nan_exceptions(): ]) def test_outer_subclass_preserve(arr): # for gh-8661 - class foo(np.ndarray): pass + class foo(np.ndarray): + pass actual = np.multiply.outer(arr.view(foo), arr.view(foo)) assert actual.__class__.__name__ == 'foo' @@ -4862,9 +4887,11 @@ def func(): class TestAdd_newdoc_ufunc: + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_ufunc_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") def test_string_arg(self): assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 493c7d6f2d03..ccc55a0a2e16 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -13,8 +13,8 @@ UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] # Remove functions that do not support `floats` -UNARY_OBJECT_UFUNCS.remove(getattr(np, 'invert')) -UNARY_OBJECT_UFUNCS.remove(getattr(np, 'bitwise_count')) +UNARY_OBJECT_UFUNCS.remove(np.invert) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) IS_AVX = __cpu_features__.get('AVX512F', False) or \ (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) @@ -53,7 +53,9 @@ def test_validate_transcendentals(self): for filename in files: filepath = path.join(data_dir, filename) with open(filepath) as fid: - file_without_comments = (r for r in fid if not r[0] in ('$', '#')) + file_without_comments = ( + r for r in fid if r[0] not in ('$', '#') + ) data = np.genfromtxt(file_without_comments, dtype=('|S39','|S39','|S39',int), names=('type','input','output','ulperr'), diff --git a/numpy/_core/tests/test_unicode.py b/numpy/_core/tests/test_unicode.py index fbacb0a95ac4..17511555ae7b 100644 --- a/numpy/_core/tests/test_unicode.py +++ b/numpy/_core/tests/test_unicode.py @@ -1,4 +1,3 @@ -import pytest import numpy as np from numpy.testing import assert_, assert_equal, assert_array_equal diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 8e51cd1694af..10278e52cbec 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -33,8 +33,8 @@ 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', + 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', - 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] + 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat'] diff --git a/numpy/_distributor_init.pyi b/numpy/_distributor_init.pyi new file mode 100644 index 000000000000..94456aba2bcf --- /dev/null +++ b/numpy/_distributor_init.pyi @@ -0,0 +1 @@ +# intentionally left blank diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 06de514e35e4..f5eb59e5ea17 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -11,7 +11,7 @@ "lookfor": "Search NumPy's documentation directly.", "who": "Use an IDE variable explorer or `locals()` instead.", "fastCopyAndTranspose": "Use `arr.T.copy()` instead.", - "set_numeric_ops": + "set_numeric_ops": "For the general case, use `PyUFunc_ReplaceLoopBySignature`. " "For ndarray subclasses, define the ``__array_ufunc__`` method " "and override the relevant ufunc.", @@ -19,11 +19,11 @@ "PINF": "Use `np.inf` instead.", "NZERO": "Use `-0.0` instead.", "PZERO": "Use `0.0` instead.", - "add_newdoc": + "add_newdoc": "It's still available as `np.lib.add_newdoc`.", - "add_docstring": + "add_docstring": "It's still available as `np.lib.add_docstring`.", - "add_newdoc_ufunc": + "add_newdoc_ufunc": "It's an internal function and doesn't have a replacement.", "compat": "There's no replacement, as Python 2 is no longer supported.", "safe_eval": "Use `ast.literal_eval` instead.", @@ -49,7 +49,7 @@ "sctype2char": "Use `np.dtype(obj).char` instead.", "sctypes": "Access dtypes explicitly instead.", "issubsctype": "Use `np.issubdtype` instead.", - "set_string_function": + "set_string_function": "Use `np.set_printoptions` instead with a formatter for " "custom printing of NumPy objects.", "asfarray": "Use `np.asarray` with a proper dtype instead.", @@ -63,16 +63,16 @@ "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " "directly, or use `typing.deprecated`.", "disp": "Use your own printing function instead.", - "find_common_type": + "find_common_type": "Use `numpy.promote_types` or `numpy.result_type` instead. " "To achieve semantics for the `scalar_types` argument, use " "`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.", "round_": "Use `np.round` instead.", "get_array_wrap": "", - "DataSource": "It's still available as `np.lib.npyio.DataSource`.", - "nbytes": "Use `np.dtype().itemsize` instead.", + "DataSource": "It's still available as `np.lib.npyio.DataSource`.", + "nbytes": "Use `np.dtype().itemsize` instead.", "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", - "compare_chararrays": + "compare_chararrays": "It's still available as `np.char.compare_chararrays`.", "format_parser": "It's still available as `np.rec.format_parser`.", "alltrue": "Use `np.all` instead.", diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 000000000000..05c630c9b767 --- /dev/null +++ b/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,63 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + compat: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + disp: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/numpy/_globals.pyi b/numpy/_globals.pyi new file mode 100644 index 000000000000..c6b17d68d6b2 --- /dev/null +++ b/numpy/_globals.pyi @@ -0,0 +1,15 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + IF_NEEDED = False + NEVER = 2 + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 4548fc6877ec..fe380dc828a5 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -74,6 +74,7 @@ class PytestTester: """ def __init__(self, module_name): self.module_name = module_name + self.__module__ = module_name def __call__(self, label='fast', verbose=1, extra_argv=None, doctests=False, coverage=False, durations=-1, tests=None): diff --git a/numpy/_pytesttester.pyi b/numpy/_pytesttester.pyi index 67ac87b33de1..f5db633fcd56 100644 --- a/numpy/_pytesttester.pyi +++ b/numpy/_pytesttester.pyi @@ -1,7 +1,7 @@ from collections.abc import Iterable from typing import Literal as L -__all__: list[str] +__all__ = ["PytestTester"] class PytestTester: module_name: str diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 01c5a7c4cf78..dd9b133ddf88 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -2,96 +2,20 @@ from __future__ import annotations -from .. import ufunc -from .._utils import set_module -from typing import TYPE_CHECKING, final - - -@final # Disallow the creation of arbitrary `NBitBase` subclasses -@set_module("numpy.typing") -class NBitBase: - """ - A type representing `numpy.number` precision during static type checking. - - Used exclusively for the purpose static type checking, `NBitBase` - represents the base of a hierarchical set of subclasses. - Each subsequent subclass is herein used for representing a lower level - of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. - - .. versionadded:: 1.20 - - Examples - -------- - Below is a typical usage example: `NBitBase` is herein used for annotating - a function that takes a float and integer of arbitrary precision - as arguments and returns a new float of whichever precision is largest - (*e.g.* ``np.float16 + np.int64 -> np.float64``). - - .. code-block:: python - - >>> from __future__ import annotations - >>> from typing import TypeVar, TYPE_CHECKING - >>> import numpy as np - >>> import numpy.typing as npt - - >>> T1 = TypeVar("T1", bound=npt.NBitBase) - >>> T2 = TypeVar("T2", bound=npt.NBitBase) - - >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: - ... return a + b - - >>> a = np.float16() - >>> b = np.int64() - >>> out = add(a, b) - - >>> if TYPE_CHECKING: - ... reveal_locals() - ... # note: Revealed local types are: - ... # note: a: numpy.floating[numpy.typing._16Bit*] - ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] - ... # note: out: numpy.floating[numpy.typing._64Bit*] - - """ - - def __init_subclass__(cls) -> None: - allowed_names = { - "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", - "_64Bit", "_32Bit", "_16Bit", "_8Bit", - } - if cls.__name__ not in allowed_names: - raise TypeError('cannot inherit from final class "NBitBase"') - super().__init_subclass__() - - -# Silence errors about subclassing a `@final`-decorated class -class _256Bit(NBitBase): # type: ignore[misc] - pass - -class _128Bit(_256Bit): # type: ignore[misc] - pass - -class _96Bit(_128Bit): # type: ignore[misc] - pass - -class _80Bit(_96Bit): # type: ignore[misc] - pass - -class _64Bit(_80Bit): # type: ignore[misc] - pass - -class _32Bit(_64Bit): # type: ignore[misc] - pass - -class _16Bit(_32Bit): # type: ignore[misc] - pass - -class _8Bit(_16Bit): # type: ignore[misc] - pass - - from ._nested_sequence import ( _NestedSequence as _NestedSequence, ) +from ._nbit_base import ( + NBitBase as NBitBase, + _8Bit as _8Bit, + _16Bit as _16Bit, + _32Bit as _32Bit, + _64Bit as _64Bit, + _80Bit as _80Bit, + _96Bit as _96Bit, + _128Bit as _128Bit, + _256Bit as _256Bit, +) from ._nbit import ( _NBitByte as _NBitByte, _NBitShort as _NBitShort, @@ -147,6 +71,17 @@ class _8Bit(_16Bit): # type: ignore[misc] _BytesCodes as _BytesCodes, _VoidCodes as _VoidCodes, _ObjectCodes as _ObjectCodes, + _StringCodes as _StringCodes, + _UnsignedIntegerCodes as _UnsignedIntegerCodes, + _SignedIntegerCodes as _SignedIntegerCodes, + _IntegerCodes as _IntegerCodes, + _FloatingCodes as _FloatingCodes, + _ComplexFloatingCodes as _ComplexFloatingCodes, + _InexactCodes as _InexactCodes, + _NumberCodes as _NumberCodes, + _CharacterCodes as _CharacterCodes, + _FlexibleCodes as _FlexibleCodes, + _GenericCodes as _GenericCodes, ) from ._scalars import ( _CharLike_co as _CharLike_co, @@ -186,15 +121,14 @@ class _8Bit(_16Bit): # type: ignore[misc] NDArray as NDArray, ArrayLike as ArrayLike, _ArrayLike as _ArrayLike, - _FiniteNestedSequence as _FiniteNestedSequence, - _SupportsArray as _SupportsArray, - _SupportsArrayFunc as _SupportsArrayFunc, _ArrayLikeInt as _ArrayLikeInt, _ArrayLikeBool_co as _ArrayLikeBool_co, _ArrayLikeUInt_co as _ArrayLikeUInt_co, _ArrayLikeInt_co as _ArrayLikeInt_co, _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, _ArrayLikeNumber_co as _ArrayLikeNumber_co, _ArrayLikeTD64_co as _ArrayLikeTD64_co, _ArrayLikeDT64_co as _ArrayLikeDT64_co, @@ -202,23 +136,19 @@ class _8Bit(_16Bit): # type: ignore[misc] _ArrayLikeVoid_co as _ArrayLikeVoid_co, _ArrayLikeStr_co as _ArrayLikeStr_co, _ArrayLikeBytes_co as _ArrayLikeBytes_co, + _ArrayLikeString_co as _ArrayLikeString_co, + _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, _ArrayLikeUnknown as _ArrayLikeUnknown, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, _UnknownType as _UnknownType, ) -if TYPE_CHECKING: - from ._ufunc import ( - _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, - _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, - _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, - _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, - _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, - ) -else: - # Declare the (type-check-only) ufunc subclasses as ufunc aliases during - # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834) - _UFunc_Nin1_Nout1 = ufunc - _UFunc_Nin2_Nout1 = ufunc - _UFunc_Nin1_Nout2 = ufunc - _UFunc_Nin2_Nout2 = ufunc - _GUFunc_Nin2_Nout1 = ufunc +from ._ufunc import ( + _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1, +) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 758d1a5be5ea..68e362b6925f 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -120,8 +120,9 @@ def _parse_docstrings() -> str: add_newdoc('NDArray', repr(NDArray), """ - A `np.ndarray[Any, np.dtype[+ScalarType]] ` type alias - :term:`generic ` w.r.t. its `dtype.type `. + A `np.ndarray[tuple[int, ...], np.dtype[+ScalarType]] ` + type alias :term:`generic ` w.r.t. its + `dtype.type `. Can be used during runtime for typing arrays with a given dtype and unspecified shape. @@ -136,10 +137,10 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[typing.Any, numpy.dtype[+_ScalarType_co]] + numpy.ndarray[tuple[int, ...], numpy.dtype[+_ScalarType_co]] >>> print(npt.NDArray[np.float64]) - numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] + numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.float64]] >>> NDArrayInt = npt.NDArray[np.int_] >>> a: NDArrayInt = np.arange(10) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 5cc501ab3ec5..7798e5d5d751 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -2,7 +2,7 @@ import sys from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable +from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable, TYPE_CHECKING import numpy as np from numpy import ( @@ -21,7 +21,16 @@ str_, bytes_, ) +from ._nbit_base import _32Bit, _64Bit from ._nested_sequence import _NestedSequence +from ._shape import _Shape + +if TYPE_CHECKING: + StringDType = np.dtypes.StringDType +else: + # at runtime outside of type checking importing this from numpy.dtypes + # would lead to a circular import + from numpy._core.multiarray import StringDType _T = TypeVar("_T") _ScalarType = TypeVar("_ScalarType", bound=generic) @@ -29,7 +38,7 @@ _DType = TypeVar("_DType", bound=dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) -NDArray: TypeAlias = ndarray[Any, dtype[_ScalarType_co]] +NDArray: TypeAlias = ndarray[_Shape, dtype[_ScalarType_co]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -79,17 +88,16 @@ def __array_function__( ) if sys.version_info >= (3, 12): - from collections.abc import Buffer - - ArrayLike: TypeAlias = Buffer | _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, - ] + from collections.abc import Buffer as _Buffer else: - ArrayLike: TypeAlias = _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, - ] + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[ + dtype[Any], + bool | int | float | complex | str | bytes, +] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` @@ -147,6 +155,20 @@ def __array_function__( dtype[bytes_], bytes, ] +_ArrayLikeString_co: TypeAlias = _DualArrayLike[ + StringDType, + str +] +_ArrayLikeAnyString_co: TypeAlias = ( + _ArrayLikeStr_co | + _ArrayLikeBytes_co | + _ArrayLikeString_co +) + +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float | int] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex | float | int] # NOTE: This includes `builtins.bool`, but not `numpy.bool`. _ArrayLikeInt: TypeAlias = _DualArrayLike[ diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 2dd2233665fc..75af1ae8efba 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -8,8 +8,6 @@ See the `Mypy documentation`_ on protocols for more details. """ -from __future__ import annotations - from typing import ( TypeAlias, TypeVar, @@ -18,12 +16,12 @@ from typing import ( Any, NoReturn, Protocol, + type_check_only, ) import numpy as np from numpy import ( generic, - timedelta64, number, integer, unsignedinteger, @@ -35,11 +33,10 @@ from numpy import ( complexfloating, complex128, ) -from ._nbit import _NBitInt, _NBitDouble +from ._nbit import _NBitInt from ._scalars import ( _BoolLike_co, _IntLike_co, - _FloatLike_co, _NumberLike_co, ) from . import NBitBase @@ -56,12 +53,13 @@ _2Tuple: TypeAlias = tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -_IntType = TypeVar("_IntType", bound=integer) -_FloatType = TypeVar("_FloatType", bound=floating) -_NumberType = TypeVar("_NumberType", bound=number) -_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number) +_IntType = TypeVar("_IntType", bound=integer[Any]) +_FloatType = TypeVar("_FloatType", bound=floating[Any]) +_NumberType = TypeVar("_NumberType", bound=number[Any]) +_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number[Any]) _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic) +@type_check_only class _BoolOp(Protocol[_GenericType_co]): @overload def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... @@ -74,6 +72,7 @@ class _BoolOp(Protocol[_GenericType_co]): @overload def __call__(self, other: _NumberType, /) -> _NumberType: ... +@type_check_only class _BoolBitOp(Protocol[_GenericType_co]): @overload def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ... @@ -82,6 +81,7 @@ class _BoolBitOp(Protocol[_GenericType_co]): @overload def __call__(self, other: _IntType, /) -> _IntType: ... +@type_check_only class _BoolSub(Protocol): # Note that `other: bool` is absent here @overload @@ -95,6 +95,7 @@ class _BoolSub(Protocol): @overload def __call__(self, other: _NumberType, /) -> _NumberType: ... +@type_check_only class _BoolTrueDiv(Protocol): @overload def __call__(self, other: float | _IntLike_co, /) -> float64: ... @@ -103,6 +104,7 @@ class _BoolTrueDiv(Protocol): @overload def __call__(self, other: _NumberType, /) -> _NumberType: ... +@type_check_only class _BoolMod(Protocol): @overload def __call__(self, other: _BoolLike_co, /) -> int8: ... @@ -115,59 +117,51 @@ class _BoolMod(Protocol): @overload def __call__(self, other: _FloatType, /) -> _FloatType: ... +@type_check_only class _BoolDivMod(Protocol): @overload def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ... @overload # platform dependent def __call__(self, other: int, /) -> _2Tuple[int_]: ... @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + def __call__(self, other: float, /) -> _2Tuple[np.float64]: ... @overload def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ... @overload def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ... -class _TD64Div(Protocol[_NumberType_co]): - @overload - def __call__(self, other: timedelta64, /) -> _NumberType_co: ... - @overload - def __call__(self, other: _BoolLike_co, /) -> NoReturn: ... - @overload - def __call__(self, other: _FloatLike_co, /) -> timedelta64: ... - +@type_check_only class _IntTrueDiv(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> floating[_NBit1]: ... @overload - def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + self, other: complex, / + ) -> complexfloating[_NBit1, _NBit1] | complex128: ... @overload - def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ... + def __call__( + self, other: integer[_NBit2], / + ) -> floating[_NBit1] | floating[_NBit2]: ... +@type_check_only class _UnsignedIntOp(Protocol[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... @overload - def __call__( - self, other: int | signedinteger[Any], / - ) -> Any: ... + def __call__(self, other: float, /) -> float64: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: complex, /) -> complex128: ... @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1 | _NBit2]: ... + def __call__(self, other: signedinteger, /) -> Any: ... +@type_check_only class _UnsignedIntBitOp(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... @@ -178,165 +172,152 @@ class _UnsignedIntBitOp(Protocol[_NBit1]): @overload def __call__( self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1 | _NBit2]: ... + ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... +@type_check_only class _UnsignedIntMod(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... @overload - def __call__( - self, other: int | signedinteger[Any], / - ) -> Any: ... + def __call__(self, other: int | signedinteger[Any], /) -> Any: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload def __call__( self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1 | _NBit2]: ... + ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... +@type_check_only class _UnsignedIntDivMod(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... @overload - def __call__( - self, other: int | signedinteger[Any], / - ) -> _2Tuple[Any]: ... + def __call__(self, other: int | signedinteger[Any], /) -> _2Tuple[Any]: ... @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... @overload def __call__( self, other: unsignedinteger[_NBit2], / - ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ... + ) -> _2Tuple[unsignedinteger[_NBit1]] | _2Tuple[unsignedinteger[_NBit2]]: ... +@type_check_only class _SignedIntOp(Protocol[_NBit1]): @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: float, /) -> float64: ... @overload - def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + def __call__(self, other: complex, /) -> complex128: ... @overload - def __call__( - self, other: signedinteger[_NBit2], /, - ) -> signedinteger[_NBit1 | _NBit2]: ... + def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... +@type_check_only class _SignedIntBitOp(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... @overload def __call__( - self, other: signedinteger[_NBit2], /, - ) -> signedinteger[_NBit1 | _NBit2]: ... + self, other: signedinteger[_NBit2], / + ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... +@type_check_only class _SignedIntMod(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ... + def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload def __call__( - self, other: signedinteger[_NBit2], /, - ) -> signedinteger[_NBit1 | _NBit2]: ... + self, other: signedinteger[_NBit2], / + ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... +@type_check_only class _SignedIntDivMod(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ... @overload - def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ... + def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[int_]: ... @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... + def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... @overload def __call__( - self, other: signedinteger[_NBit2], /, - ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ... + self, other: signedinteger[_NBit2], / + ) -> _2Tuple[signedinteger[_NBit1]] | _2Tuple[signedinteger[_NBit2]]: ... +@type_check_only class _FloatOp(Protocol[_NBit1]): @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + def __call__(self, other: int, /) -> floating[_NBit1]: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + self, other: complex, / + ) -> complexfloating[_NBit1, _NBit1] | complex128: ... @overload def __call__( self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1 | _NBit2]: ... + ) -> floating[_NBit1] | floating[_NBit2]: ... +@type_check_only class _FloatMod(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> floating[_NBit1]: ... @overload - def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ... + def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ... + def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload def __call__( self, other: integer[_NBit2] | floating[_NBit2], / - ) -> floating[_NBit1 | _NBit2]: ... + ) -> floating[_NBit1] | floating[_NBit2]: ... class _FloatDivMod(Protocol[_NBit1]): @overload def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ... @overload - def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ... - @overload - def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ... - @overload def __call__( - self, other: integer[_NBit2] | floating[_NBit2], / - ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ... - -class _ComplexOp(Protocol[_NBit1]): - @overload - def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ... - @overload - def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ... + self, other: int, / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBitInt]]: ... @overload def __call__( - self, other: complex, /, - ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ... + self, other: float, / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[float64]: ... @overload def __call__( - self, - other: ( - integer[_NBit2] - | floating[_NBit2] - | complexfloating[_NBit2, _NBit2] - ), /, - ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ... + self, other: integer[_NBit2] | floating[_NBit2], / + ) -> _2Tuple[floating[_NBit1]] | _2Tuple[floating[_NBit2]]: ... +@type_check_only class _NumberOp(Protocol): def __call__(self, other: _NumberLike_co, /) -> Any: ... @final +@type_check_only class _SupportsLT(Protocol): def __lt__(self, other: Any, /) -> Any: ... @final +@type_check_only class _SupportsLE(Protocol): def __le__(self, other: Any, /) -> Any: ... @final +@type_check_only class _SupportsGT(Protocol): def __gt__(self, other: Any, /) -> Any: ... @final +@type_check_only class _SupportsGE(Protocol): def __ge__(self, other: Any, /) -> Any: ... @final +@type_check_only class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): @overload def __call__(self, other: _T1_contra, /) -> np.bool: ... @@ -348,6 +329,7 @@ class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): def __call__(self, other: _SupportsGT, /) -> np.bool: ... @final +@type_check_only class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): @overload def __call__(self, other: _T1_contra, /) -> np.bool: ... @@ -359,6 +341,7 @@ class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): def __call__(self, other: _SupportsGE, /) -> np.bool: ... @final +@type_check_only class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): @overload def __call__(self, other: _T1_contra, /) -> np.bool: ... @@ -370,6 +353,7 @@ class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): def __call__(self, other: _SupportsLT, /) -> np.bool: ... @final +@type_check_only class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): @overload def __call__(self, other: _T1_contra, /) -> np.bool: ... diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index 1d36cc81e018..a14c01a513ba 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -139,3 +139,72 @@ "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", ] + +# NOTE: `StringDType' has no scalar type, and therefore has no name that can +# be passed to the `dtype` constructor +_StringCodes = Literal["T", "|T", "=T", "T"] + +# NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't +# the case for a `Union` of `Literal`s. +# So even though they're equivalent when type-checking, they differ at runtime. +# Another advantage of nesting, is that they always have a "flat" +# `Literal.__args__`, which is a tuple of *literally* all its literal values. + +_UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, +] +_SignedIntegerCodes = Literal[ + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, +] +_FloatingCodes = Literal[ + _Float16Codes, + _Float32Codes, + _Float64Codes, + _LongDoubleCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes +] +_ComplexFloatingCodes = Literal[ + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +_NumberCodes = Literal[_IntegerCodes, _InexactCodes] + +_CharacterCodes = Literal[_StrCodes, _BytesCodes] +_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] + +_GenericCodes = Literal[ + _BoolCodes, + _NumberCodes, + _FlexibleCodes, + _DT64Codes, + _TD64Codes, + _ObjectCodes, + # TODO: add `_StringCodes` once it has a scalar type + # _StringCodes, +] diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index b68b5337219d..4d08089081d6 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,4 +1,4 @@ -from collections.abc import Sequence +from collections.abc import Sequence # noqa: F811 from typing import ( Any, TypeAlias, @@ -129,7 +129,7 @@ def dtype(self) -> _DType_co: ... # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), -# this syntax is officially discourged and +# this syntax is officially discouraged and # therefore not included in the type-union defining `DTypeLike`. # # See https://github.com/numpy/numpy/issues/16891 for more details. @@ -144,9 +144,9 @@ def dtype(self) -> _DType_co: ... | _BoolCodes ) _DTypeLikeUInt: TypeAlias = ( - type[np.unsignedinteger] - | np.dtype[np.unsignedinteger] - | _SupportsDType[np.dtype[np.unsignedinteger]] + type[np.unsignedinteger[Any]] + | np.dtype[np.unsignedinteger[Any]] + | _SupportsDType[np.dtype[np.unsignedinteger[Any]]] | _UInt8Codes | _UInt16Codes | _UInt32Codes @@ -161,9 +161,9 @@ def dtype(self) -> _DType_co: ... ) _DTypeLikeInt: TypeAlias = ( type[int] - | type[np.signedinteger] - | np.dtype[np.signedinteger] - | _SupportsDType[np.dtype[np.signedinteger]] + | type[np.signedinteger[Any]] + | np.dtype[np.signedinteger[Any]] + | _SupportsDType[np.dtype[np.signedinteger[Any]]] | _Int8Codes | _Int16Codes | _Int32Codes @@ -178,9 +178,9 @@ def dtype(self) -> _DType_co: ... ) _DTypeLikeFloat: TypeAlias = ( type[float] - | type[np.floating] - | np.dtype[np.floating] - | _SupportsDType[np.dtype[np.floating]] + | type[np.floating[Any]] + | np.dtype[np.floating[Any]] + | _SupportsDType[np.dtype[np.floating[Any]]] | _Float16Codes | _Float32Codes | _Float64Codes @@ -191,9 +191,9 @@ def dtype(self) -> _DType_co: ... ) _DTypeLikeComplex: TypeAlias = ( type[complex] - | type[np.complexfloating] - | np.dtype[np.complexfloating] - | _SupportsDType[np.dtype[np.complexfloating]] + | type[np.complexfloating[Any]] + | np.dtype[np.complexfloating[Any]] + | _SupportsDType[np.dtype[np.complexfloating[Any]]] | _Complex64Codes | _Complex128Codes | _CSingleCodes diff --git a/numpy/_typing/_nbit.py b/numpy/_typing/_nbit.py index 7a4ca8837a2c..70cfdede8025 100644 --- a/numpy/_typing/_nbit.py +++ b/numpy/_typing/_nbit.py @@ -1,17 +1,19 @@ """A module with the precisions of platform-specific `~numpy.number`s.""" -from typing import Any +from typing import TypeAlias +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit + # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin -_NBitByte = Any -_NBitShort = Any -_NBitIntC = Any -_NBitIntP = Any -_NBitInt = Any -_NBitLong = Any -_NBitLongLong = Any +_NBitByte: TypeAlias = _8Bit +_NBitShort: TypeAlias = _16Bit +_NBitIntC: TypeAlias = _32Bit +_NBitIntP: TypeAlias = _32Bit | _64Bit +_NBitInt: TypeAlias = _NBitIntP +_NBitLong: TypeAlias = _32Bit | _64Bit +_NBitLongLong: TypeAlias = _64Bit -_NBitHalf = Any -_NBitSingle = Any -_NBitDouble = Any -_NBitLongDouble = Any +_NBitHalf: TypeAlias = _16Bit +_NBitSingle: TypeAlias = _32Bit +_NBitDouble: TypeAlias = _64Bit +_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py new file mode 100644 index 000000000000..4f764757c4ea --- /dev/null +++ b/numpy/_typing/_nbit_base.py @@ -0,0 +1,100 @@ +"""A module with the precisions of generic `~numpy.number` types.""" +from .._utils import set_module +from typing import final + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from __future__ import annotations + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> S = TypeVar("S", bound=npt.NBitBase) + >>> T = TypeVar("T", bound=npt.NBitBase) + + >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", + "_64Bit", "_32Bit", "_16Bit", "_8Bit", + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + +@final +@set_module("numpy._typing") +# Silence errors about subclassing a `@final`-decorated class +class _256Bit(NBitBase): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _128Bit(_256Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _96Bit(_128Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _80Bit(_96Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _64Bit(_80Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _32Bit(_64Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _16Bit(_32Bit): # type: ignore[misc] + pass + +@final +@set_module("numpy._typing") +class _8Bit(_16Bit): # type: ignore[misc] + pass diff --git a/numpy/_typing/_nested_sequence.py b/numpy/_typing/_nested_sequence.py index 3d0d25ae5b48..23667fd46d89 100644 --- a/numpy/_typing/_nested_sequence.py +++ b/numpy/_typing/_nested_sequence.py @@ -2,14 +2,17 @@ from __future__ import annotations -from collections.abc import Iterator from typing import ( Any, TypeVar, Protocol, runtime_checkable, + TYPE_CHECKING, ) +if TYPE_CHECKING: + from collections.abc import Iterator + __all__ = ["_NestedSequence"] _T_co = TypeVar("_T_co", covariant=True) diff --git a/numpy/_typing/_ufunc.py b/numpy/_typing/_ufunc.py new file mode 100644 index 000000000000..d0573c8f5463 --- /dev/null +++ b/numpy/_typing/_ufunc.py @@ -0,0 +1,7 @@ +from .. import ufunc + +_UFunc_Nin1_Nout1 = ufunc +_UFunc_Nin2_Nout1 = ufunc +_UFunc_Nin1_Nout2 = ufunc +_UFunc_Nin2_Nout2 = ufunc +_GUFunc_Nin2_Nout1 = ufunc diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 5e52039864b7..b5ac0ff635dd 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,39 +4,54 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. - """ from typing import ( Any, Generic, - overload, - TypeVar, Literal, - SupportsIndex, - Protocol, NoReturn, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + overload, + type_check_only, ) -from numpy import ufunc, _CastingKind, _OrderKACF +from typing_extensions import LiteralString, Unpack + +import numpy as np +from numpy import _CastingKind, _OrderKACF, ufunc from numpy.typing import NDArray -from ._shape import _ShapeLike -from ._scalars import _ScalarLike_co from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike _T = TypeVar("_T") -_2Tuple = tuple[_T, _T] -_3Tuple = tuple[_T, _T, _T] -_4Tuple = tuple[_T, _T, _T, _T] +_2Tuple: TypeAlias = tuple[_T, _T] +_3Tuple: TypeAlias = tuple[_T, _T, _T] +_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] + +_2PTuple: TypeAlias = tuple[_T, _T, Unpack[tuple[_T, ...]]] +_3PTuple: TypeAlias = tuple[_T, _T, _T, Unpack[tuple[_T, ...]]] +_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, Unpack[tuple[_T, ...]]] _NTypes = TypeVar("_NTypes", bound=int, covariant=True) -_IDType = TypeVar("_IDType", bound=Any, covariant=True) -_NameType = TypeVar("_NameType", bound=str, covariant=True) -_Signature = TypeVar("_Signature", bound=str, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) +_NIn = TypeVar("_NIn", bound=int, covariant=True) +_NOut = TypeVar("_NOut", bound=int, covariant=True) +_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) +_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) + +@type_check_only class _SupportsArrayUFunc(Protocol): def __array_ufunc__( self, @@ -46,6 +61,13 @@ class _SupportsArrayUFunc(Protocol): **kwargs: Any, ) -> Any: ... +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. @@ -57,10 +79,15 @@ class _SupportsArrayUFunc(Protocol): # NOTE: If 2 output types are returned then `out` must be a # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable +# pyright: reportIncompatibleMethodOverride=false + +@type_check_only class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -125,11 +152,13 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def reduceat(self, *args, **kwargs) -> NoReturn: ... def outer(self, *args, **kwargs) -> NoReturn: ... - +@type_check_only class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -142,34 +171,61 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def signature(self) -> None: ... - @overload + @overload # (scalar, scalar) -> scalar def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... - @overload + @overload # (array-like, array) -> array def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: ArrayLike, + x2: NDArray[np.generic], + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: NDArray[np.generic], + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... def at( self, @@ -207,40 +263,69 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: None | NDArray[Any] = ..., ) -> NDArray[Any]: ... - # Expand `**kwargs` into explicit keyword-only arguments - @overload + @overload # (scalar, scalar) -> scalar def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... - @overload - def outer( # type: ignore[misc] + @overload # (array-like, array) -> array + def outer( self, A: ArrayLike, + B: NDArray[np.generic], + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: NDArray[np.generic], B: ArrayLike, - /, *, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... +@type_check_only class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -304,10 +389,13 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def reduceat(self, *args, **kwargs) -> NoReturn: ... def outer(self, *args, **kwargs) -> NoReturn: ... +@type_check_only class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -358,10 +446,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def reduceat(self, *args, **kwargs) -> NoReturn: ... def outer(self, *args, **kwargs) -> NoReturn: ... +@type_check_only class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -409,3 +500,443 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] def accumulate(self, *args, **kwargs) -> NoReturn: ... def reduceat(self, *args, **kwargs) -> NoReturn: ... def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _3PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): + where: None | _ArrayLikeBool_co + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _4PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: _ArrayType | tuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ArrayType: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: _ArrayType | tuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayType: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + x2: _SupportsArrayUFunc | ArrayLike, + /, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: _SupportsArrayUFunc, + /, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... + + @overload + def reduce( + self, + array: ArrayLike, + axis: None | _ShapeLike, + dtype: DTypeLike, + out: _ArrayType, + /, + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayType: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayType | tuple[_ArrayType], + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayType: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + *, + keepdims: Literal[True], + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: None | _ShapeLike = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ReturnType_co | NDArray[np.object_]: ... + + @overload + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayType, + /, + ) -> _ArrayType: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayType | tuple[_ArrayType], + ) -> _ArrayType: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduceat( + self, + /, + array: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + ) -> Any: ... + + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayType, + /, + ) -> _ArrayType: ... + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayType | tuple[_ArrayType], + ) -> _ArrayType: ... + @overload + def accumulate( + self, + /, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + + @overload + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: _ArrayType, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayType: ... + @overload + def outer( + self, + A: _SupportsArrayUFunc, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def outer( + self, + A: _ScalarLike_co, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + +@type_check_only +class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> Literal[1]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + x3: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: _ArrayType | tuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ArrayType: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + x2: _SupportsArrayUFunc | ArrayLike, + x3: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> _NOut: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: _2PTuple[_ArrayType], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ArrayType]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: None | _2PTuple[NDArray[Any]] = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... diff --git a/numpy/_utils/_inspect.py b/numpy/_utils/_inspect.py index 9a874a71dd0a..c8805dddc014 100644 --- a/numpy/_utils/_inspect.py +++ b/numpy/_utils/_inspect.py @@ -54,7 +54,7 @@ def iscode(object): co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables - + """ return isinstance(object, types.CodeType) @@ -117,7 +117,7 @@ def getargvalues(frame): 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'locals' is the locals dictionary of the given frame. - + """ args, varargs, varkw = getargs(frame.f_code) return args, varargs, varkw, frame.f_locals diff --git a/numpy/char/__init__.pyi b/numpy/char/__init__.pyi index 3a98cbb42ecc..2abf86d305f8 100644 --- a/numpy/char/__init__.pyi +++ b/numpy/char/__init__.pyi @@ -1,57 +1,111 @@ from numpy._core.defchararray import ( - equal as equal, - not_equal as not_equal, - greater_equal as greater_equal, - less_equal as less_equal, - greater as greater, - less as less, - str_len as str_len, - add as add, - multiply as multiply, - mod as mod, - capitalize as capitalize, - center as center, - count as count, - decode as decode, - encode as encode, - endswith as endswith, - expandtabs as expandtabs, - find as find, - index as index, - isalnum as isalnum, - isalpha as isalpha, - isdigit as isdigit, - islower as islower, - isspace as isspace, - istitle as istitle, - isupper as isupper, - join as join, - ljust as ljust, - lower as lower, - lstrip as lstrip, - partition as partition, - replace as replace, - rfind as rfind, - rindex as rindex, - rjust as rjust, - rpartition as rpartition, - rsplit as rsplit, - rstrip as rstrip, - split as split, - splitlines as splitlines, - startswith as startswith, - strip as strip, - swapcase as swapcase, - title as title, - translate as translate, - upper as upper, - zfill as zfill, - isnumeric as isnumeric, - isdecimal as isdecimal, - array as array, - asarray as asarray, - compare_chararrays as compare_chararrays, - chararray as chararray + equal, + not_equal, + greater_equal, + less_equal, + greater, + less, + str_len, + add, + multiply, + mod, + capitalize, + center, + count, + decode, + encode, + endswith, + expandtabs, + find, + index, + isalnum, + isalpha, + isdigit, + islower, + isspace, + istitle, + isupper, + join, + ljust, + lower, + lstrip, + partition, + replace, + rfind, + rindex, + rjust, + rpartition, + rsplit, + rstrip, + split, + splitlines, + startswith, + strip, + swapcase, + title, + translate, + upper, + zfill, + isnumeric, + isdecimal, + array, + asarray, + compare_chararrays, + chararray ) -__all__: list[str] +__all__ = [ + "equal", + "not_equal", + "greater_equal", + "less_equal", + "greater", + "less", + "str_len", + "add", + "multiply", + "mod", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "expandtabs", + "find", + "index", + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "join", + "ljust", + "lower", + "lstrip", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rsplit", + "rstrip", + "split", + "splitlines", + "startswith", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", + "isnumeric", + "isdecimal", + "array", + "asarray", + "compare_chararrays", + "chararray", +] diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py index d02c9f8fe341..74870e8ad954 100644 --- a/numpy/compat/py3k.py +++ b/numpy/compat/py3k.py @@ -119,8 +119,6 @@ def npy_load_module(name, fn, info=None): 3.12. An alternative that uses ``exec_module`` is in numpy.distutils.misc_util.exec_mod_from_location - .. versionadded:: 1.11.2 - Parameters ---------- name : str diff --git a/numpy/conftest.py b/numpy/conftest.py index 677537e206f0..0eb42d1103e4 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,6 +2,7 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os +import string import sys import tempfile from contextlib import contextmanager @@ -10,9 +11,11 @@ import hypothesis import pytest import numpy +import numpy as np from numpy._core._multiarray_tests import get_fpu_mode -from numpy.testing._private.utils import NOGIL_BUILD +from numpy._core.tests._natype import pd_NA +from numpy.testing._private.utils import NOGIL_BUILD, get_stringdtype_dtype try: from scipy_doctest.conftest import dt_config @@ -32,7 +35,7 @@ # We register two custom profiles for Numpy - for details see # https://hypothesis.readthedocs.io/en/latest/settings.html -# The first is designed for our own CI runs; the latter also +# The first is designed for our own CI runs; the latter also # forces determinism and is designed for use via np.test() hypothesis.settings.register_profile( name="numpy-profile", deadline=None, print_blob=True, @@ -42,8 +45,8 @@ deadline=None, print_blob=True, database=None, derandomize=True, suppress_health_check=list(hypothesis.HealthCheck), ) -# Note that the default profile is chosen based on the presence -# of pytest.ini, but can be overridden by passing the +# Note that the default profile is chosen based on the presence +# of pytest.ini, but can be overridden by passing the # --hypothesis-profile=NAME argument to pytest. _pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini") hypothesis.settings.load_profile( @@ -150,23 +153,6 @@ def env_setup(monkeypatch): monkeypatch.setenv('PYTHONHASHSEED', '0') -@pytest.fixture(params=[True, False]) -def weak_promotion(request): - """ - Fixture to ensure "legacy" promotion state or change it to use the new - weak promotion (plus warning). `old_promotion` should be used as a - parameter in the function. - """ - state = numpy._get_promotion_state() - if request.param: - numpy._set_promotion_state("weak_and_warn") - else: - numpy._set_promotion_state("legacy") - - yield request.param - numpy._set_promotion_state(state) - - if HAVE_SCPDT: @contextmanager @@ -211,6 +197,9 @@ def warnings_errors_and_rng(test=None): dt_config.rndm_markers.add('#uninitialized') dt_config.rndm_markers.add('# uninitialized') + # make the checker pick on mismatched dtypes + dt_config.strict_check = True + import doctest dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS @@ -218,12 +207,12 @@ def warnings_errors_and_rng(test=None): dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType # temporary skips - dt_config.skiplist = set([ + dt_config.skiplist = { 'numpy.savez', # unclosed file 'numpy.matlib.savez', 'numpy.__array_namespace_info__', 'numpy.matlib.__array_namespace_info__', - ]) + } # xfail problematic tutorials dt_config.pytest_extra_xfail = { @@ -245,3 +234,28 @@ def warnings_errors_and_rng(test=None): 'numpy/f2py/_backends/_distutils.py', ] + +@pytest.fixture +def random_string_list(): + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +@pytest.fixture(params=[True, False]) +def coerce(request): + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + return get_stringdtype_dtype(na_object, coerce) diff --git a/numpy/core/_utils.py b/numpy/core/_utils.py index ad076b0315f1..5f47f4ba46f8 100644 --- a/numpy/core/_utils.py +++ b/numpy/core/_utils.py @@ -1,7 +1,7 @@ import warnings -def _raise_warning(attr: str, submodule: str = None) -> None: +def _raise_warning(attr: str, submodule: str | None = None) -> None: new_module = "numpy._core" old_module = "numpy.core" if submodule is not None: @@ -16,6 +16,6 @@ def _raise_warning(attr: str, submodule: str = None) -> None: "use the public NumPy API. If not, you are using NumPy internals. " "If you would still like to access an internal attribute, " f"use {new_module}.{attr}.", - DeprecationWarning, + DeprecationWarning, stacklevel=3 ) diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index ea94ad30852e..f607773444c0 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -53,9 +53,7 @@ 'as_ctypes_type'] import os -from numpy import ( - integer, ndarray, dtype as _dtype, asarray, frombuffer -) +import numpy as np from numpy._core.multiarray import _flagdict, flagsobj try: @@ -181,7 +179,7 @@ def _flags_fromnum(num): class _ndptr(_ndptr_base): @classmethod def from_param(cls, obj): - if not isinstance(obj, ndarray): + if not isinstance(obj, np.ndarray): raise TypeError("argument must be an ndarray") if cls._dtype_ is not None \ and obj.dtype != cls._dtype_: @@ -221,10 +219,10 @@ def contents(self): This mirrors the `contents` attribute of a normal ctypes pointer """ - full_dtype = _dtype((self._dtype_, self._shape_)) + full_dtype = np.dtype((self._dtype_, self._shape_)) full_ctype = ctypes.c_char * full_dtype.itemsize buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents - return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) + return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0) # Factory for an array-checking class with from_param defined for @@ -282,16 +280,16 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): """ - # normalize dtype to an Optional[dtype] + # normalize dtype to dtype | None if dtype is not None: - dtype = _dtype(dtype) + dtype = np.dtype(dtype) - # normalize flags to an Optional[int] + # normalize flags to int | None num = None if flags is not None: if isinstance(flags, str): flags = flags.split(',') - elif isinstance(flags, (int, integer)): + elif isinstance(flags, (int, np.integer)): num = flags flags = _flags_fromnum(num) elif isinstance(flags, flagsobj): @@ -304,7 +302,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None): raise TypeError("invalid flags specification") from e num = _num_fromflags(flags) - # normalize shape to an Optional[tuple] + # normalize shape to tuple | None if shape is not None: try: shape = tuple(shape) @@ -368,7 +366,7 @@ def _get_scalar_type_map(): ct.c_float, ct.c_double, ct.c_bool, ] - return {_dtype(ctype): ctype for ctype in simple_types} + return {np.dtype(ctype): ctype for ctype in simple_types} _scalar_type_map = _get_scalar_type_map() @@ -516,7 +514,7 @@ def as_ctypes_type(dtype): """ - return _ctype_from_dtype(_dtype(dtype)) + return _ctype_from_dtype(np.dtype(dtype)) def as_array(obj, shape=None): @@ -527,6 +525,26 @@ def as_array(obj, shape=None): The shape parameter must be given if converting from a ctypes POINTER. The shape parameter is ignored if converting from a ctypes array + + Examples + -------- + Converting a ctypes integer array: + + >>> import ctypes + >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4) + >>> np_array = np.ctypeslib.as_array(ctypes_array) + >>> np_array + array([0, 1, 2, 3, 4], dtype=int32) + + Converting a ctypes POINTER: + + >>> import ctypes + >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4) + >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int)) + >>> np_array = np.ctypeslib.as_array(pointer, (5,)) + >>> np_array + array([0, 1, 2, 3, 4], dtype=int32) + """ if isinstance(obj, ctypes._Pointer): # convert pointers to an array of the desired shape @@ -537,12 +555,35 @@ def as_array(obj, shape=None): p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape)) obj = ctypes.cast(obj, p_arr_type).contents - return asarray(obj) + return np.asarray(obj) def as_ctypes(obj): - """Create and return a ctypes object from a numpy array. Actually - anything that exposes the __array_interface__ is accepted.""" + """ + Create and return a ctypes object from a numpy array. Actually + anything that exposes the __array_interface__ is accepted. + + Examples + -------- + Create ctypes object from inferred int ``np.array``: + + >>> inferred_int_array = np.array([1, 2, 3]) + >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array) + >>> type(c_int_array) + + >>> c_int_array[:] + [1, 2, 3] + + Create ctypes object from explicit 8 bit unsigned int ``np.array`` : + + >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8) + >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array) + >>> type(c_int_array) + + >>> c_int_array[:] + [1, 2, 3] + + """ ai = obj.__array_interface__ if ai["strides"]: raise TypeError("strided arrays not supported") diff --git a/numpy/ctypeslib.pyi b/numpy/ctypeslib.pyi index ce8854ca13c1..fd5d99451071 100644 --- a/numpy/ctypeslib.pyi +++ b/numpy/ctypeslib.pyi @@ -1,13 +1,14 @@ # NOTE: Numpy's mypy plugin is used for importing the correct # platform-specific `ctypes._SimpleCData[int]` sub-type +import ctypes from ctypes import c_int64 as _c_intp -import os -import ctypes +from _typeshed import StrOrBytesPath from collections.abc import Iterable, Sequence from typing import ( Literal as L, Any, + TypeAlias, TypeVar, Generic, overload, @@ -24,13 +25,11 @@ from numpy import ( intc, long, longlong, - intp, ubyte, ushort, uintc, ulong, ulonglong, - uintp, single, double, longdouble, @@ -44,6 +43,7 @@ from numpy._typing import ( _ArrayLike, # Shapes + _Shape, _ShapeLike, # DTypes @@ -66,12 +66,14 @@ from numpy._typing import ( _LongDoubleCodes, ) +__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"] + # TODO: Add a proper `_Shape` bound once we've got variadic typevars _DType = TypeVar("_DType", bound=dtype[Any]) _DTypeOptional = TypeVar("_DTypeOptional", bound=None | dtype[Any]) _SCT = TypeVar("_SCT", bound=generic) -_FlagsKind = L[ +_FlagsKind: TypeAlias = L[ 'C_CONTIGUOUS', 'CONTIGUOUS', 'C', 'F_CONTIGUOUS', 'FORTRAN', 'F', 'ALIGNED', 'A', @@ -100,14 +102,9 @@ class _concrete_ndptr(_ndptr[_DType]): _dtype_: ClassVar[_DType] _shape_: ClassVar[tuple[int, ...]] @property - def contents(self) -> ndarray[Any, _DType]: ... - -def load_library( - libname: str | bytes | os.PathLike[str] | os.PathLike[bytes], - loader_path: str | bytes | os.PathLike[str] | os.PathLike[bytes], -) -> ctypes.CDLL: ... + def contents(self) -> ndarray[_Shape, _DType]: ... -__all__: list[str] +def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ... c_intp = _c_intp diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py index 44265bfcce89..ca4099886d8c 100644 --- a/numpy/distutils/command/config_compiler.py +++ b/numpy/distutils/command/config_compiler.py @@ -57,7 +57,7 @@ def initialize_options(self): self.noarch = None def finalize_options(self): - log.info('unifing config_fc, config, build_clib, build_ext, build commands --fcompiler options') + log.info('unifying config_fc, config, build_clib, build_ext, build commands --fcompiler options') build_clib = self.get_finalized_command('build_clib') build_ext = self.get_finalized_command('build_ext') config = self.get_finalized_command('config') @@ -98,7 +98,7 @@ def initialize_options(self): self.compiler = None def finalize_options(self): - log.info('unifing config_cc, config, build_clib, build_ext, build commands --compiler options') + log.info('unifying config_cc, config, build_clib, build_ext, build commands --compiler options') build_clib = self.get_finalized_command('build_clib') build_ext = self.get_finalized_command('build_ext') config = self.get_finalized_command('config') diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index a67453abf624..2d06585a1497 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -306,7 +306,7 @@ def _quote_arg(arg): """ Quote the argument for safe use in a shell command line. """ - # If there is a quote in the string, assume relevants parts of the + # If there is a quote in the string, assume relevant parts of the # string are already quoted (e.g. '-I"C:\\Program Files\\..."') if '"' not in arg and ' ' in arg: return '"%s"' % arg diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py index 0fa1c11dd676..77fb39889a29 100644 --- a/numpy/distutils/intelccompiler.py +++ b/numpy/distutils/intelccompiler.py @@ -37,12 +37,7 @@ def __init__(self, verbose=0, dry_run=0, force=0): class IntelItaniumCCompiler(IntelCCompiler): compiler_type = 'intele' - - # On Itanium, the Intel Compiler used to be called ecc, let's search for - # it (now it's also icc, so ecc is last in the search). - for cc_exe in map(find_executable, ['icc', 'ecc']): - if cc_exe: - break + cc_exe = 'icc' class IntelEM64TCCompiler(UnixCCompiler): diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 4763f41ad326..2599a9e9a807 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -24,7 +24,13 @@ import distutils.cygwinccompiler from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version + +try: + from distutils.msvccompiler import get_build_version as get_build_msvc_version +except ImportError: + def get_build_msvc_version(): + return None + from distutils.errors import UnknownFileError from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, @@ -184,7 +190,7 @@ def find_python_dll(): # - find it in the virtualenv (sys.prefix) # - find it in python main dir (sys.base_prefix, if in a virtualenv) # - in system32, - # - ortherwise (Sxs), I don't know how to get it. + # - otherwise (Sxs), I don't know how to get it. stems = [sys.prefix] if sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index edf56909ab5d..64785481b617 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ This file defines a set of system_info classes for getting information about various resources (libraries, library directories, diff --git a/numpy/distutils/tests/test_build_ext.py b/numpy/distutils/tests/test_build_ext.py index 55e134b2a047..7124cc407a2f 100644 --- a/numpy/distutils/tests/test_build_ext.py +++ b/numpy/distutils/tests/test_build_ext.py @@ -56,7 +56,7 @@ def configuration(parent_package="", top_path=None): from numpy.distutils.core import setup setup(**configuration(top_path="").todict())''')) - # build the test extensino and "install" into a temporary directory + # build the test extension and "install" into a temporary directory build_dir = tmp_path subprocess.check_call([sys.executable, 'setup.py', 'build', 'install', '--prefix', str(tmp_path / 'installdir'), diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 706e538c8bea..5cb345035f2c 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -5,10 +5,10 @@ from typing import ( Literal as L, NoReturn, TypeAlias, - TypeVar, final, + type_check_only, ) -from typing_extensions import LiteralString +from typing_extensions import LiteralString, Self, TypeVar import numpy as np @@ -50,12 +50,12 @@ __all__ = [ # Helper base classes (typing-only) -_SelfT = TypeVar("_SelfT", bound=np.dtype[Any]) _SCT_co = TypeVar("_SCT_co", bound=np.generic, covariant=True) +@type_check_only class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] names: None # pyright: ignore[reportIncompatibleVariableOverride] - def __new__(cls: type[_SelfT], /) -> _SelfT: ... + def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property def base(self) -> np.dtype[_SCT_co]: ... @@ -72,7 +72,8 @@ class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] @property def subdtype(self) -> None: ... -class _LiteralDType(Generic[_SCT_co], _SimpleDType[_SCT_co]): +@type_check_only +class _LiteralDType(Generic[_SCT_co], _SimpleDType[_SCT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -84,6 +85,7 @@ _KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) _CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) _NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) +@type_check_only class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): @final @property @@ -95,19 +97,22 @@ class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): @property def num(self) -> _NumT_co: ... +@type_check_only class _NoOrder: @final @property def byteorder(self) -> L["|"]: ... +@type_check_only class _NativeOrder: @final @property def byteorder(self) -> L["="]: ... _DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) -_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True) +_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int) +@type_check_only class _NBit(Generic[_DataSize_co, _ItemSize_co]): @final @property @@ -116,12 +121,13 @@ class _NBit(Generic[_DataSize_co, _ItemSize_co]): @property def itemsize(self) -> _ItemSize_co: ... +@type_check_only class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... # Boolean: @final -class BoolDType( +class BoolDType( # type: ignore[misc] _TypeCodes[L["b"], L["?"], L[0]], _8Bit, _LiteralDType[np.bool], @@ -134,7 +140,7 @@ class BoolDType( # Sized integers: @final -class Int8DType( +class Int8DType( # type: ignore[misc] _TypeCodes[L["i"], L["b"], L[1]], _8Bit, _LiteralDType[np.int8], @@ -145,7 +151,7 @@ class Int8DType( def str(self) -> L["|i1"]: ... @final -class UInt8DType( +class UInt8DType( # type: ignore[misc] _TypeCodes[L["u"], L["B"], L[2]], _8Bit, _LiteralDType[np.uint8], @@ -156,7 +162,7 @@ class UInt8DType( def str(self) -> L["|u1"]: ... @final -class Int16DType( +class Int16DType( # type: ignore[misc] _TypeCodes[L["i"], L["h"], L[3]], _NativeOrder, _NBit[L[2], L[2]], @@ -168,7 +174,7 @@ class Int16DType( def str(self) -> L["i2"]: ... @final -class UInt16DType( +class UInt16DType( # type: ignore[misc] _TypeCodes[L["u"], L["H"], L[4]], _NativeOrder, _NBit[L[2], L[2]], @@ -180,7 +186,7 @@ class UInt16DType( def str(self) -> L["u2"]: ... @final -class Int32DType( +class Int32DType( # type: ignore[misc] _TypeCodes[L["i"], L["i", "l"], L[5, 7]], _NativeOrder, _NBit[L[4], L[4]], @@ -192,7 +198,7 @@ class Int32DType( def str(self) -> L["i4"]: ... @final -class UInt32DType( +class UInt32DType( # type: ignore[misc] _TypeCodes[L["u"], L["I", "L"], L[6, 8]], _NativeOrder, _NBit[L[4], L[4]], @@ -204,7 +210,7 @@ class UInt32DType( def str(self) -> L["u4"]: ... @final -class Int64DType( +class Int64DType( # type: ignore[misc] _TypeCodes[L["i"], L["l", "q"], L[7, 9]], _NativeOrder, _NBit[L[8], L[8]], @@ -216,7 +222,7 @@ class Int64DType( def str(self) -> L["i8"]: ... @final -class UInt64DType( +class UInt64DType( # type: ignore[misc] _TypeCodes[L["u"], L["L", "Q"], L[8, 10]], _NativeOrder, _NBit[L[8], L[8]], @@ -234,7 +240,7 @@ ShortDType: Final = Int16DType UShortDType: Final = UInt16DType @final -class IntDType( +class IntDType( # type: ignore[misc] _TypeCodes[L["i"], L["i"], L[5]], _NativeOrder, _NBit[L[4], L[4]], @@ -246,7 +252,7 @@ class IntDType( def str(self) -> L["i4"]: ... @final -class UIntDType( +class UIntDType( # type: ignore[misc] _TypeCodes[L["u"], L["I"], L[6]], _NativeOrder, _NBit[L[4], L[4]], @@ -258,7 +264,7 @@ class UIntDType( def str(self) -> L["u4"]: ... @final -class LongDType( +class LongDType( # type: ignore[misc] _TypeCodes[L["i"], L["l"], L[7]], _NativeOrder, _NBit[L[4, 8], L[4, 8]], @@ -270,7 +276,7 @@ class LongDType( def str(self) -> L["i4", "i8"]: ... @final -class ULongDType( +class ULongDType( # type: ignore[misc] _TypeCodes[L["u"], L["L"], L[8]], _NativeOrder, _NBit[L[4, 8], L[4, 8]], @@ -282,7 +288,7 @@ class ULongDType( def str(self) -> L["u4", "u8"]: ... @final -class LongLongDType( +class LongLongDType( # type: ignore[misc] _TypeCodes[L["i"], L["q"], L[9]], _NativeOrder, _NBit[L[8], L[8]], @@ -294,7 +300,7 @@ class LongLongDType( def str(self) -> L["i8"]: ... @final -class ULongLongDType( +class ULongLongDType( # type: ignore[misc] _TypeCodes[L["u"], L["Q"], L[10]], _NativeOrder, _NBit[L[8], L[8]], @@ -308,7 +314,7 @@ class ULongLongDType( # Floats: @final -class Float16DType( +class Float16DType( # type: ignore[misc] _TypeCodes[L["f"], L["e"], L[23]], _NativeOrder, _NBit[L[2], L[2]], @@ -320,7 +326,7 @@ class Float16DType( def str(self) -> L["f2"]: ... @final -class Float32DType( +class Float32DType( # type: ignore[misc] _TypeCodes[L["f"], L["f"], L[11]], _NativeOrder, _NBit[L[4], L[4]], @@ -332,7 +338,7 @@ class Float32DType( def str(self) -> L["f4"]: ... @final -class Float64DType( +class Float64DType( # type: ignore[misc] _TypeCodes[L["f"], L["d"], L[12]], _NativeOrder, _NBit[L[8], L[8]], @@ -344,7 +350,7 @@ class Float64DType( def str(self) -> L["f8"]: ... @final -class LongDoubleDType( +class LongDoubleDType( # type: ignore[misc] _TypeCodes[L["f"], L["g"], L[13]], _NativeOrder, _NBit[L[8, 12, 16], L[8, 12, 16]], @@ -358,7 +364,7 @@ class LongDoubleDType( # Complex: @final -class Complex64DType( +class Complex64DType( # type: ignore[misc] _TypeCodes[L["c"], L["F"], L[14]], _NativeOrder, _NBit[L[4], L[8]], @@ -370,7 +376,7 @@ class Complex64DType( def str(self) -> L["c8"]: ... @final -class Complex128DType( +class Complex128DType( # type: ignore[misc] _TypeCodes[L["c"], L["D"], L[15]], _NativeOrder, _NBit[L[8], L[16]], @@ -382,7 +388,7 @@ class Complex128DType( def str(self) -> L["c16"]: ... @final -class CLongDoubleDType( +class CLongDoubleDType( # type: ignore[misc] _TypeCodes[L["c"], L["G"], L[16]], _NativeOrder, _NBit[L[8, 12, 16], L[16, 24, 32]], @@ -396,7 +402,7 @@ class CLongDoubleDType( # Python objects: @final -class ObjectDType( +class ObjectDType( # type: ignore[misc] _TypeCodes[L["O"], L["O"], L[17]], _NoOrder, _NBit[L[8], L[8]], @@ -412,7 +418,7 @@ class ObjectDType( # Flexible: @final -class BytesDType( +class BytesDType( # type: ignore[misc] Generic[_ItemSize_co], _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, @@ -428,7 +434,7 @@ class BytesDType( def str(self) -> LiteralString: ... @final -class StrDType( +class StrDType( # type: ignore[misc] Generic[_ItemSize_co], _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, @@ -444,17 +450,17 @@ class StrDType( def str(self) -> LiteralString: ... @final -class VoidDType( +class VoidDType( # type: ignore[misc] Generic[_ItemSize_co], _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, _NBit[L[1], _ItemSize_co], - np.dtype[np.void], # type: ignore[misc] + np.dtype[np.void], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... @property - def base(self: _SelfT) -> _SelfT: ... + def base(self) -> Self: ... @property def isalignedstruct(self) -> L[False]: ... @property @@ -477,7 +483,7 @@ _TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] _DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit @final -class DateTime64DType( +class DateTime64DType( # type: ignore[misc] _TypeCodes[L["M"], L["M"], L[21]], _NativeOrder, _NBit[L[8], L[8]], @@ -522,7 +528,7 @@ class DateTime64DType( ]: ... @final -class TimeDelta64DType( +class TimeDelta64DType( # type: ignore[misc] _TypeCodes[L["m"], L["m"], L[22]], _NativeOrder, _NBit[L[8], L[8]], @@ -567,12 +573,12 @@ class TimeDelta64DType( ]: ... @final -class StringDType( +class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], _NativeOrder, _NBit[L[8], L[16]], # TODO: Replace the (invalid) `str` with the scalar type, once implemented - np.dtype[str], # type: ignore[misc] + np.dtype[str], # type: ignore[type-var] ): def __new__(cls, /) -> StringDType: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @@ -597,4 +603,4 @@ class StringDType( @property def subdtype(self) -> None: ... @property - def type(self) -> type[str]: ... + def type(self) -> type[str]: ... # type: ignore[valid-type] diff --git a/numpy/exceptions.py b/numpy/exceptions.py index adf88c754b66..9bf74fc4d0a3 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -86,9 +86,9 @@ class VisibleDeprecationWarning(UserWarning): class RankWarning(RuntimeWarning): """Matrix rank warning. - + Issued by polynomial functions when the design matrix is rank deficient. - + """ pass @@ -117,8 +117,6 @@ class AxisError(ValueError, IndexError): ``except ValueError`` and ``except IndexError`` statements continue to catch ``AxisError``. - .. versionadded:: 1.13 - Parameters ---------- axis : int or str diff --git a/numpy/exceptions.pyi b/numpy/exceptions.pyi index 8a99713f7006..7caa96c4673c 100644 --- a/numpy/exceptions.pyi +++ b/numpy/exceptions.pyi @@ -1,6 +1,13 @@ from typing import overload -__all__: list[str] +__all__ = [ + "ComplexWarning", + "VisibleDeprecationWarning", + "ModuleDeprecationWarning", + "TooHardError", + "AxisError", + "DTypePromotionError", +] class ComplexWarning(RuntimeWarning): ... class ModuleDeprecationWarning(DeprecationWarning): ... @@ -16,4 +23,3 @@ class AxisError(ValueError, IndexError): def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ... @overload def __init__(self, axis: int, ndim: int, msg_prefix: None | str = ...) -> None: ... - def __str__(self) -> str: ... diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py index dfb897671c3f..8bf1d637ec0c 100644 --- a/numpy/f2py/__init__.py +++ b/numpy/f2py/__init__.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """Fortran to Python Interface Generator. Copyright 1999 -- 2011 Pearu Peterson all rights reserved. diff --git a/numpy/f2py/__init__.pyi b/numpy/f2py/__init__.pyi index 81b6a24f39ec..9cf1247f7797 100644 --- a/numpy/f2py/__init__.pyi +++ b/numpy/f2py/__init__.pyi @@ -1,30 +1,29 @@ -import os +from _typeshed import StrOrBytesPath import subprocess from collections.abc import Iterable -from typing import Literal as L, Any, overload, TypedDict +from typing import Literal as L, overload, TypedDict, type_check_only -from numpy._pytesttester import PytestTester +__all__ = ["run_main", "get_include"] +@type_check_only class _F2PyDictBase(TypedDict): csrc: list[str] h: list[str] +@type_check_only class _F2PyDict(_F2PyDictBase, total=False): fsrc: list[str] ltx: list[str] -__all__: list[str] -test: PytestTester - def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... @overload -def compile( # type: ignore[misc] +def compile( source: str | bytes, modulename: str = ..., extra_args: str | list[str] = ..., verbose: bool = ..., - source_fn: None | str | bytes | os.PathLike[Any] = ..., + source_fn: StrOrBytesPath | None = ..., extension: L[".f", ".f90"] = ..., full_output: L[False] = ..., ) -> int: ... @@ -34,9 +33,10 @@ def compile( modulename: str = ..., extra_args: str | list[str] = ..., verbose: bool = ..., - source_fn: None | str | bytes | os.PathLike[Any] = ..., + source_fn: StrOrBytesPath | None = ..., extension: L[".f", ".f90"] = ..., - full_output: L[True] = ..., + *, + full_output: L[True], ) -> subprocess.CompletedProcess[bytes]: ... def get_include() -> str: ... diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py index f2436f86a7e6..aa7680a07ff9 100644 --- a/numpy/f2py/_backends/_distutils.py +++ b/numpy/f2py/_backends/_distutils.py @@ -42,7 +42,7 @@ def compile(self): i = get_info(n) if not i: print( - f"No {repr(n)} resources found" + f"No {n!r} resources found" "in system (try `f2py --help-link`)" ) dict_append(ext_args, **i) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index b438ed223433..9195e51f02fd 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -12,7 +12,6 @@ from string import Template from itertools import chain -import warnings class MesonTemplate: @@ -118,7 +117,7 @@ def include_substitution(self) -> None: def fortran_args_substitution(self) -> None: if self.fortran_args: self.substitutions["fortran_args"] = ( - f"{self.indent}fortran_args: [{', '.join([arg for arg in self.fortran_args])}]," + f"{self.indent}fortran_args: [{', '.join(list(self.fortran_args))}]," ) else: self.substitutions["fortran_args"] = "" diff --git a/numpy/f2py/_src_pyf.py b/numpy/f2py/_src_pyf.py index 6247b95bfe46..ce59a35fed3d 100644 --- a/numpy/f2py/_src_pyf.py +++ b/numpy/f2py/_src_pyf.py @@ -1,3 +1,4 @@ +import os import re # START OF CODE VENDORED FROM `numpy.distutils.from_template` diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 68b56c5a640c..e926a52d1b51 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -13,7 +13,6 @@ import re import types from functools import reduce -from copy import deepcopy from . import __version__ from . import cfuncs @@ -27,7 +26,7 @@ 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', 'isallocatable', 'isarray', 'isarrayofstrings', 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', - 'iscomplex', + 'iscomplex', 'iscstyledirective', 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', @@ -36,16 +35,15 @@ 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', 'islogicalfunction', 'islong_complex', 'islong_double', 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', - 'issubroutine', 'get_f2py_modulename', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', - 'getuseblocks', 'process_f2cmap_dict' + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict', 'containscommon' ] @@ -425,6 +423,11 @@ def isrequired(var): return not isoptional(var) and isintent_nothide(var) +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + def isintent_in(var): if 'intent' not in var: return 1 @@ -518,6 +521,15 @@ def isprivate(var): return 'attrspec' in var and 'private' in var['attrspec'] +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + def hasinitvalue(var): return '=' in var diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index 8a8939d7260a..83e5b1ba945a 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -627,7 +627,7 @@ def routsign2map(rout): ln = k break lcb_map[ln] = un[1] - elif 'externals' in rout and rout['externals']: + elif rout.get('externals'): errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( ret['name'], repr(rout['externals']))) ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' @@ -798,7 +798,7 @@ def cb_routsign2map(rout, um): return ret -def common_sign2map(a, var): # obsolute +def common_sign2map(a, var): # obsolete ret = {'varname': a, 'ctype': getctype(var)} if isstringarray(var): ret['ctype'] = 'char' diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 1dc3247323d5..6856416fd04a 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ C declarations, CPP macros, and C functions for f2py2e. Only required declarations/macros/functions will be used. @@ -549,24 +548,32 @@ def errmess(s: str) -> None: #error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html #endif """ + +# Defining the correct value to indicate thread-local storage in C without +# running a compile-time check (which we have no control over in generated +# code used outside of NumPy) is hard. Therefore we support overriding this +# via an external define - the f2py-using package can then use the same +# compile-time checks as we use for `NPY_TLS` when building NumPy (see +# scipy#21860 for an example of that). +# +# __STDC_NO_THREADS__ should not be coupled to the availability of _Thread_local. +# In case we get a bug report, guard it with __STDC_NO_THREADS__ after all. +# +# `thread_local` has become a keyword in C23, but don't try to use that yet +# (too new, doing so while C23 support is preliminary will likely cause more +# problems than it solves). +# +# Note: do not try to use `threads.h`, its availability is very low +# *and* threads.h isn't actually used where `F2PY_THREAD_LOCAL_DECL` is +# in the generated code. See gh-27718 for more details. cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ #ifndef F2PY_THREAD_LOCAL_DECL #if defined(_MSC_VER) #define F2PY_THREAD_LOCAL_DECL __declspec(thread) #elif defined(NPY_OS_MINGW) #define F2PY_THREAD_LOCAL_DECL __thread -#elif defined(__STDC_VERSION__) \\ - && (__STDC_VERSION__ >= 201112L) \\ - && !defined(__STDC_NO_THREADS__) \\ - && (!defined(__GLIBC__) || __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 12)) \\ - && !defined(NPY_OS_OPENBSD) && !defined(NPY_OS_HAIKU) -/* __STDC_NO_THREADS__ was first defined in a maintenance release of glibc 2.12, - see https://lists.gnu.org/archive/html/commit-hurd/2012-07/msg00180.html, - so `!defined(__STDC_NO_THREADS__)` may give false positive for the existence - of `threads.h` when using an older release of glibc 2.12 - See gh-19437 for details on OpenBSD */ -#include -#define F2PY_THREAD_LOCAL_DECL thread_local +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +#define F2PY_THREAD_LOCAL_DECL _Thread_local #elif defined(__GNUC__) \\ && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) #define F2PY_THREAD_LOCAL_DECL __thread diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py old mode 100755 new mode 100644 index 68ef46c05fc0..94cb64abe035 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ crackfortran --- read fortran (77,90) code and extract declaration information. @@ -425,11 +424,14 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[-1] not in "\n\r\f": break l = l[:-1] + # Do not lower for directives, gh-2547, gh-27697, gh-26681 + is_f2py_directive = False # Unconditionally remove comments (l, rl) = split_by_unquoted(l, '!') l += ' ' if rl[:5].lower() == '!f2py': # f2py directive l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + is_f2py_directive = True if l.strip() == '': # Skip empty line if sourcecodeform == 'free': # In free form, a statement continues in the next line @@ -449,13 +451,15 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[0] in ['*', 'c', '!', 'C', '#']: if l[1:5].lower() == 'f2py': # f2py directive l = ' ' + l[5:] + is_f2py_directive = True else: # Skip comment line cont = False + is_f2py_directive = False continue elif strictf77: if len(l) > 72: l = l[:72] - if not (l[0] in spacedigits): + if l[0] not in spacedigits: raise Exception('readfortrancode: Found non-(space,digit) char ' 'in the first column.\n\tAre you sure that ' 'this code is in fix form?\n\tline=%s' % repr(l)) @@ -476,6 +480,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: # clean up line beginning from possible digits. l = ' ' + l[5:] + # f2py directives are already stripped by this point if localdolowercase: finalline = ll.lower() else: @@ -505,7 +510,9 @@ def readfortrancode(ffile, dowithline=show, istop=1): origfinalline = '' else: if localdolowercase: - finalline = ll.lower() + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll else: finalline = ll origfinalline = ll @@ -537,6 +544,7 @@ def readfortrancode(ffile, dowithline=show, istop=1): else: dowithline(finalline) l1 = ll + # Last line should never have an f2py directive anyway if localdolowercase: finalline = ll.lower() else: @@ -2079,7 +2087,7 @@ def postcrack(block, args=None, tab=''): block = analyzecommon(block) block['vars'] = analyzevars(block) block['sortvars'] = sortvarnames(block['vars']) - if 'args' in block and block['args']: + if block.get('args'): args = block['args'] block['body'] = analyzebody(block, args, tab=tab) @@ -2095,7 +2103,7 @@ def postcrack(block, args=None, tab=''): if 'name' in block: name = block['name'] # and not userisdefined: # Build a __user__ module - if 'externals' in block and block['externals']: + if block.get('externals'): interfaced = [] if 'interfaced' in block: interfaced = block['interfaced'] @@ -2951,7 +2959,7 @@ def compute_deps(v, deps): else: outmess( 'analyzevars: prefix (%s) were not used\n' % repr(block['prefix'])) - if not block['block'] in ['module', 'pythonmodule', 'python module', 'block data']: + if block['block'] not in ['module', 'pythonmodule', 'python module', 'block data']: if 'commonvars' in block: neededvars = copy.copy(block['args'] + block['commonvars']) else: @@ -3029,8 +3037,8 @@ def param_eval(v, g_params, params, dimspec=None): ubound = param_parse(dimrange[1], params) dimrange = range(int(lbound), int(ubound)+1) else: - raise ValueError(f'param_eval: multidimensional array parameters ' - '{dimspec} not supported') + raise ValueError('param_eval: multidimensional array parameters ' + f'{dimspec} not supported') # Parse parameter value v = (v[2:-2] if v.startswith('(/') else v).split(',') diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index 86d7004abad4..523c2c679d9e 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -100,7 +100,7 @@ def run(): print('------') except Exception as msg: print( - 'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)') + 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') print('------') try: if has_numpy_distutils == 2: diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py old mode 100755 new mode 100644 index f9fa29806e3e..c0f801e06c7f --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ f2py2e - Fortran to Python C/API generator. 2nd Edition. @@ -15,10 +14,7 @@ import os import pprint import re -from pathlib import Path -from itertools import dropwhile import argparse -import copy from . import crackfortran from . import rules @@ -547,7 +543,7 @@ def __call__(self, parser, namespace, values, option_string=None): include_paths_set.update(values.split(':')) else: include_paths_set.add(values) - setattr(namespace, 'include_paths', list(include_paths_set)) + namespace.include_paths = list(include_paths_set) def f2py_parser(): parser = argparse.ArgumentParser(add_help=False) diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index db53beaf616b..b1cd15320657 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -97,9 +97,6 @@ def dadd(line, s=doc): usenames = getuseblocks(pymod) for m in findf90modules(pymod): - contains_functions_or_subroutines = any( - item for item in m["body"] if item["block"] in ["function", "subroutine"] - ) sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ m['name']], [] sargsp = [] @@ -110,13 +107,19 @@ def dadd(line, s=doc): notvars.append(b['name']) for n in m['vars'].keys(): var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) outmess('\t\tConstructing F90 module support for "%s"...\n' % (m['name'])) - if m['name'] in usenames and not contains_functions_or_subroutines: - outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") continue if onlyvars: outmess('\t\t Variables: %s\n' % (' '.join(onlyvars))) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py old mode 100755 new mode 100644 index 7566e1ececeb..84137811a446 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ Rules for building C/API module with f2py2e. @@ -47,7 +46,8 @@ NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. """ -import os, sys +import os +import sys import time import copy from pathlib import Path @@ -245,6 +245,11 @@ if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + return m; } #ifdef __cplusplus @@ -459,7 +464,7 @@ { extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); PyObject* o = PyDict_GetItemString(d,"#name#"); - tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),NULL); PyObject_SetAttrString(o,"_cpointer", tmp); Py_DECREF(tmp); s = PyUnicode_FromString("#name#"); diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 3594147281a2..4e2aa370b643 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -863,7 +863,7 @@ ndarray_from_pyobj(const int type_num, * dtype('S'). In addition, there is also dtype('c'), that * appears as dtype('S1') (these have the same type_num value), * but is actually different (.char attribute is either 'S' or - * 'c', respecitely). + * 'c', respectively). * * In Fortran, character arrays and strings are different * concepts. The relation between Fortran types, NumPy dtypes, diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 6884a473b43b..63d277d9b01d 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1425,7 +1425,7 @@ def restore(r): return result # referencing/dereferencing - if r.startswith('*') or r.startswith('&'): + if r.startswith(('*', '&')): op = {'*': Op.DEREF, '&': Op.REF}[r[0]] operand = self.process(restore(r[1:])) return Expr(op, operand) diff --git a/numpy/f2py/tests/src/callback/gh26681.f90 b/numpy/f2py/tests/src/callback/gh26681.f90 new file mode 100644 index 000000000000..00c0ec93df05 --- /dev/null +++ b/numpy/f2py/tests/src/callback/gh26681.f90 @@ -0,0 +1,18 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort + + subroutine do_something(message) + !f2py intent(callback, hide) mypy_abort + character(len=*), intent(in) :: message + call mypy_abort(message) + end subroutine do_something +end module utils diff --git a/numpy/f2py/tests/src/crackfortran/gh27697.f90 b/numpy/f2py/tests/src/crackfortran/gh27697.f90 new file mode 100644 index 000000000000..a5eae4e79b25 --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/gh27697.f90 @@ -0,0 +1,12 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort +end module utils diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 000000000000..07adce591f35 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 000000000000..b7fb95b010a6 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/regression/assignOnlyModule.f90 b/numpy/f2py/tests/src/regression/assignOnlyModule.f90 new file mode 100644 index 000000000000..479ac7980c22 --- /dev/null +++ b/numpy/f2py/tests/src/regression/assignOnlyModule.f90 @@ -0,0 +1,25 @@ + MODULE MOD_TYPES + INTEGER, PARAMETER :: SP = SELECTED_REAL_KIND(6, 37) + INTEGER, PARAMETER :: DP = SELECTED_REAL_KIND(15, 307) + END MODULE +! + MODULE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE + INTEGER, PARAMETER :: N_MAX = 16 + INTEGER, PARAMETER :: I_MAX = 18 + INTEGER, PARAMETER :: J_MAX = 72 + REAL(SP) :: XREF + END MODULE F_GLOBALS +! + SUBROUTINE DUMMY () +! + USE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE +! + REAL(SP) :: MINIMAL + MINIMAL = 0.01*XREF + RETURN +! + END SUBROUTINE DUMMY diff --git a/numpy/f2py/tests/src/regression/datonly.f90 b/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 000000000000..67fc4aca82e3 --- /dev/null +++ b/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 000000000000..1c4b8c192b1b --- /dev/null +++ b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/numpy/f2py/tests/src/routines/funcfortranname.f b/numpy/f2py/tests/src/routines/funcfortranname.f new file mode 100644 index 000000000000..89be972d3419 --- /dev/null +++ b/numpy/f2py/tests/src/routines/funcfortranname.f @@ -0,0 +1,5 @@ + REAL*8 FUNCTION FUNCFORTRANNAME(A,B) + REAL*8 A, B + FUNCFORTRANNAME = A + B + RETURN + END FUNCTION diff --git a/numpy/f2py/tests/src/routines/funcfortranname.pyf b/numpy/f2py/tests/src/routines/funcfortranname.pyf new file mode 100644 index 000000000000..8730ca6a67ed --- /dev/null +++ b/numpy/f2py/tests/src/routines/funcfortranname.pyf @@ -0,0 +1,11 @@ +python module funcfortranname ! in + interface ! in :funcfortranname + function funcfortranname_default(a,b) ! in :funcfortranname:funcfortranname.f + fortranname funcfortranname + real*8 :: a + real*8 :: b + real*8 :: funcfortranname_default + real*8, intent(out) :: funcfortranname + end function funcfortranname_default + end interface +end python module funcfortranname diff --git a/numpy/f2py/tests/src/routines/subrout.f b/numpy/f2py/tests/src/routines/subrout.f new file mode 100644 index 000000000000..1d1eeaeb5a45 --- /dev/null +++ b/numpy/f2py/tests/src/routines/subrout.f @@ -0,0 +1,4 @@ + SUBROUTINE SUBROUT(A,B,C) + REAL*8 A, B, C + C = A + B + END SUBROUTINE diff --git a/numpy/f2py/tests/src/routines/subrout.pyf b/numpy/f2py/tests/src/routines/subrout.pyf new file mode 100644 index 000000000000..e27cbe1c7455 --- /dev/null +++ b/numpy/f2py/tests/src/routines/subrout.pyf @@ -0,0 +1,10 @@ +python module subrout ! in + interface ! in :subrout + subroutine subrout_default(a,b,c) ! in :subrout:subrout.f + fortranname subrout + real*8 :: a + real*8 :: b + real*8, intent(out) :: c + end subroutine subrout_default + end interface +end python module subrout diff --git a/numpy/f2py/tests/test_abstract_interface.py b/numpy/f2py/tests/test_abstract_interface.py index 2c6555aecea1..0bc38b51f95d 100644 --- a/numpy/f2py/tests/test_abstract_interface.py +++ b/numpy/f2py/tests/test_abstract_interface.py @@ -1,6 +1,4 @@ -from pathlib import Path import pytest -import textwrap from . import util from numpy.f2py import crackfortran from numpy.testing import IS_WASM diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index c10fe75a04cf..41ed2c7a0dfe 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -1,4 +1,3 @@ -import os import sys import copy import platform @@ -7,7 +6,6 @@ import numpy as np -from numpy.testing import assert_, assert_equal from numpy._core._type_aliases import c_names_dict as _c_names_dict from . import util @@ -145,7 +143,7 @@ def is_intent_exact(self, *names): # 32 bit system malloc typically does not provide the alignment required by # 16 byte long double types this means the inout intent cannot be satisfied -# and several tests fail as the alignment flag can be randomly true or fals +# and several tests fail as the alignment flag can be randomly true or false # when numpy gains an aligned allocator the tests could be enabled again # # Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. @@ -191,12 +189,12 @@ def _init(self, name): if self.NAME == 'CHARACTER': info = c_names_dict[self.NAME] - self.type_num = getattr(wrap, 'NPY_STRING') + self.type_num = wrap.NPY_STRING self.elsize = 1 self.dtype = np.dtype('c') elif self.NAME.startswith('STRING'): info = c_names_dict[self.NAME[:6]] - self.type_num = getattr(wrap, 'NPY_STRING') + self.type_num = wrap.NPY_STRING self.elsize = int(self.NAME[6:] or 0) self.dtype = np.dtype(f'S{self.elsize}') else: diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py index 8bd6175a3eb9..4a9ed484a4a4 100644 --- a/numpy/f2py/tests/test_callback.py +++ b/numpy/f2py/tests/test_callback.py @@ -5,6 +5,7 @@ import threading import traceback import time +import platform import numpy as np from numpy.testing import IS_PYPY @@ -94,7 +95,7 @@ def callback(code): else: return 1 - f = getattr(self.module, "string_callback") + f = self.module.string_callback r = f(callback) assert r == 0 @@ -115,7 +116,7 @@ def callback(cu, lencu): return 3 return 0 - f = getattr(self.module, "string_callback_array") + f = self.module.string_callback_array for cu in [cu1, cu2, cu3]: res = f(callback, cu, cu.size) assert res == 0 @@ -244,3 +245,17 @@ def bar(x): res = self.module.foo(bar) assert res == 110 + + +@pytest.mark.slow +@pytest.mark.xfail(condition=(platform.system().lower() == 'darwin'), + run=False, + reason="Callback aborts cause CI failures on macOS") +class TestCBFortranCallstatement(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh26681.f90")] + options = ['--lower'] + + def test_callstatement_fortran(self): + with pytest.raises(ValueError, match='helpme') as exc: + self.module.mypy_abort = self.module.utils.my_abort + self.module.utils.do_something('helpme') diff --git a/numpy/f2py/tests/test_character.py b/numpy/f2py/tests/test_character.py index 50e55e1a91cf..da00fa9e27cd 100644 --- a/numpy/f2py/tests/test_character.py +++ b/numpy/f2py/tests/test_character.py @@ -102,7 +102,7 @@ def test_array_input(self, length): {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], ], dtype='S') - expected = np.array([[c for c in s] for s in a], dtype='u1') + expected = np.array([list(s) for s in a], dtype='u1') assert_array_equal(f(a), expected) @pytest.mark.parametrize("length", length_list) @@ -114,7 +114,7 @@ def test_array_output(self, length): [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') - a = np.array([[c for c in s] for s in expected], dtype='u1') + a = np.array([list(s) for s in expected], dtype='u1') assert_array_equal(f(a), expected) @pytest.mark.parametrize("length", length_list) @@ -127,7 +127,7 @@ def test_2d_array_input(self, length): [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], dtype='S') - expected = np.array([[[c for c in item] for item in row] for row in a], + expected = np.array([[list(item) for item in row] for row in a], dtype='u1', order='F') assert_array_equal(f(a), expected) diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 4986cfbdc4c7..ed3588c25475 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -1,7 +1,5 @@ import importlib -import codecs import time -import unicodedata import pytest import numpy as np from numpy.f2py.crackfortran import markinnerspaces, nameargspattern @@ -405,3 +403,12 @@ def test_param_eval_too_many_dims(self): dimspec = '(0:4, 3:12, 5)' pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, dimspec=dimspec) + +@pytest.mark.slow +class TestLowerF2PYDirective(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh27697.f90")] + options = ['--lower'] + + def test_no_lower_fail(self): + with pytest.raises(ValueError, match='aborting directly') as exc: + self.module.utils.my_abort('aborting directly') diff --git a/numpy/f2py/tests/test_data.py b/numpy/f2py/tests/test_data.py index 5af5c40447d3..e2a425084a55 100644 --- a/numpy/f2py/tests/test_data.py +++ b/numpy/f2py/tests/test_data.py @@ -1,4 +1,3 @@ -import os import pytest import numpy as np diff --git a/numpy/f2py/tests/test_docs.py b/numpy/f2py/tests/test_docs.py index 55540a9c7d19..efba7ea40ee6 100644 --- a/numpy/f2py/tests/test_docs.py +++ b/numpy/f2py/tests/test_docs.py @@ -34,11 +34,11 @@ class TestDocAdvanced(util.F2PyTest): _path('ftype.f')] def test_asterisk1(self): - foo = getattr(self.module, 'foo1') + foo = self.module.foo1 assert_equal(foo(), b'123456789A12') def test_asterisk2(self): - foo = getattr(self.module, 'foo2') + foo = self.module.foo2 assert_equal(foo(2), b'12') assert_equal(foo(12), b'123456789A12') assert_equal(foo(20), b'123456789A123456789B') diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index ce0046eb1b4b..3f321418f403 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -1,6 +1,11 @@ -import textwrap, re, sys, subprocess, shlex +import re +import shlex +import subprocess +import sys +import textwrap from pathlib import Path from collections import namedtuple + import platform import pytest @@ -244,7 +249,7 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out monkeypatch.setattr( - sys, "argv", f"f2py --help-link".split() + sys, "argv", "f2py --help-link".split() ) with util.switchdir(ipath.parent): f2pycli() diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py index c8cc57ff21c9..a8403ca36606 100644 --- a/numpy/f2py/tests/test_kind.py +++ b/numpy/f2py/tests/test_kind.py @@ -1,5 +1,4 @@ import sys -import os import pytest import platform diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py index 49d0ba20c29a..688c1630fda6 100644 --- a/numpy/f2py/tests/test_mixed.py +++ b/numpy/f2py/tests/test_mixed.py @@ -1,4 +1,3 @@ -import os import textwrap import pytest diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 009ae3365cd5..436e0c700017 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -5,6 +5,37 @@ from numpy.testing import IS_PYPY +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + @pytest.mark.slow class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py index 9c83af174440..154131f49f7b 100644 --- a/numpy/f2py/tests/test_parameter.py +++ b/numpy/f2py/tests/test_parameter.py @@ -1,4 +1,3 @@ -import os import pytest import numpy as np diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index e11ed1a0efa3..c62f82ac3fc0 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -24,6 +24,18 @@ def test_inout(self): assert np.allclose(x, [3, 1, 2]) +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + class TestNegativeBounds(util.F2PyTest): # Check that negative bounds work correctly sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] @@ -110,6 +122,15 @@ def test_gh26148b(self): assert(res[0] == 8) assert(res[1] == 15) +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + @pytest.mark.slow def test_gh26623(): # Including libraries with . should not generate an incorrect meson.build @@ -139,3 +160,15 @@ def test_gh25784(): ) except ImportError as rerr: assert "unknown_subroutine_" in str(rerr) + + +@pytest.mark.slow +class TestAssignmentOnlyModules(util.F2PyTest): + # Ensure that variables are exposed without functions or subroutines in a module + sources = [util.getpath("tests", "src", "regression", "assignOnlyModule.f90")] + + @pytest.mark.slow + def test_gh27167(self): + assert (self.module.f_globals.n_max == 16) + assert (self.module.f_globals.i_max == 18) + assert (self.module.f_globals.j_max == 72) diff --git a/numpy/f2py/tests/test_routines.py b/numpy/f2py/tests/test_routines.py new file mode 100644 index 000000000000..d6ab475d899e --- /dev/null +++ b/numpy/f2py/tests/test_routines.py @@ -0,0 +1,28 @@ +import pytest +from . import util + + +@pytest.mark.slow +class TestRenamedFunc(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "funcfortranname.f"), + util.getpath("tests", "src", "routines", "funcfortranname.pyf"), + ] + module_name = "funcfortranname" + + def test_gh25799(self): + assert dir(self.module) + assert self.module.funcfortranname_default(200, 12) == 212 + + +@pytest.mark.slow +class TestRenamedSubroutine(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "subrout.f"), + util.getpath("tests", "src", "routines", "subrout.pyf"), + ] + module_name = "subrout" + + def test_renamed_subroutine(self): + assert dir(self.module) + assert self.module.subrout_default(200, 12) == 212 diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py index bd2c349df585..b354711b457f 100644 --- a/numpy/f2py/tests/test_size.py +++ b/numpy/f2py/tests/test_size.py @@ -1,4 +1,3 @@ -import os import pytest import numpy as np diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py index 9e937188c930..1888f649f543 100644 --- a/numpy/f2py/tests/test_string.py +++ b/numpy/f2py/tests/test_string.py @@ -1,6 +1,4 @@ -import os import pytest -import textwrap import numpy as np from . import util diff --git a/numpy/f2py/tests/test_value_attrspec.py b/numpy/f2py/tests/test_value_attrspec.py index 3855a6273288..1f3fa676ba8c 100644 --- a/numpy/f2py/tests/test_value_attrspec.py +++ b/numpy/f2py/tests/test_value_attrspec.py @@ -1,4 +1,3 @@ -import os import pytest from . import util diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 9cad71a9cf5c..e2fcc1ba39d4 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -13,8 +13,6 @@ import tempfile import shutil import atexit -import textwrap -import re import pytest import contextlib import numpy @@ -59,7 +57,6 @@ def check_language(lang, code_snippet=None): return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) - return False fortran77_code = ''' diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py index 808b3dd97ec2..19c111aae56d 100644 --- a/numpy/f2py/use_rules.py +++ b/numpy/f2py/use_rules.py @@ -55,7 +55,7 @@ def buildusevars(m, r): r['map'][k], k, revmap[r['map'][k]])) else: revmap[r['map'][k]] = k - if 'only' in r and r['only']: + if r.get('only'): for v in r['map'].keys(): if r['map'][v] in m['vars']: diff --git a/numpy/fft/__init__.pyi b/numpy/fft/__init__.pyi index 504baff265a6..feac6a7ff8a1 100644 --- a/numpy/fft/__init__.pyi +++ b/numpy/fft/__init__.pyi @@ -1,28 +1,43 @@ -from numpy._pytesttester import PytestTester - -from numpy.fft._pocketfft import ( - fft as fft, - ifft as ifft, - rfft as rfft, - irfft as irfft, - hfft as hfft, - ihfft as ihfft, - rfftn as rfftn, - irfftn as irfftn, - rfft2 as rfft2, - irfft2 as irfft2, - fft2 as fft2, - ifft2 as ifft2, - fftn as fftn, - ifftn as ifftn, +from ._pocketfft import ( + fft, + ifft, + rfft, + irfft, + hfft, + ihfft, + rfftn, + irfftn, + rfft2, + irfft2, + fft2, + ifft2, + fftn, + ifftn, ) - -from numpy.fft._helper import ( - fftshift as fftshift, - ifftshift as ifftshift, - fftfreq as fftfreq, - rfftfreq as rfftfreq, +from ._helper import ( + fftshift, + ifftshift, + fftfreq, + rfftfreq, ) -__all__: list[str] -test: PytestTester +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index a3c17fc675e7..5cb28db2239e 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -10,9 +10,9 @@ from numpy._typing import ( _ArrayLikeComplex_co, ) -_SCT = TypeVar("_SCT", bound=generic) +__all__ = ["fftshift", "ifftshift", "fftfreq", "rfftfreq"] -__all__: list[str] +_SCT = TypeVar("_SCT", bound=generic) @overload def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 2199797ad900..c5b5bfdd8372 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -132,8 +132,6 @@ def fft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -249,8 +247,6 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the inverse DFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -341,8 +337,6 @@ def rfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -449,8 +443,6 @@ def irfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -549,8 +541,6 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -652,8 +642,6 @@ def ihfft(a, n=None, axis=-1, norm=None, out=None): Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -808,8 +796,6 @@ def fftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -951,8 +937,6 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1077,8 +1061,6 @@ def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1211,8 +1193,6 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1328,8 +1308,6 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1401,7 +1379,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm, out=out) - for ii in range(len(axes)-1): + for ii in range(len(axes)-2, -1, -1): a = fft(a, s[ii], axes[ii], norm, out=out) return a @@ -1442,8 +1420,6 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1548,8 +1524,6 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): must be explicitly specified too. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. @@ -1666,8 +1640,6 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): must not be ``None``. norm : {"backward", "ortho", "forward"}, optional - .. versionadded:: 1.10.0 - Normalization mode (see `numpy.fft`). Default is "backward". Indicates which direction of the forward/backward pair of transforms is scaled and with what normalization factor. diff --git a/numpy/fft/_pocketfft.pyi b/numpy/fft/_pocketfft.pyi index 7f088572efe8..78f1ff692df0 100644 --- a/numpy/fft/_pocketfft.pyi +++ b/numpy/fft/_pocketfft.pyi @@ -1,12 +1,27 @@ from collections.abc import Sequence -from typing import Literal as L +from typing import Literal as L, TypeAlias from numpy import complex128, float64 from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co -_NormKind = L[None, "backward", "ortho", "forward"] - -__all__: list[str] +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", +] + +_NormKind: TypeAlias = L[None, "backward", "ortho", "forward"] def fft( a: ArrayLike, diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index d1e4da2eb831..dff2c86742d5 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -307,6 +307,14 @@ def test_rfftn(self): np.fft.rfftn(x, norm="ortho"), atol=1e-6) assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) def test_irfftn(self): x = random((30, 20, 10)) @@ -494,7 +502,7 @@ def test_fft_with_order(dtype, order, fft): Y_res = fft(Y, axes=ax) assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) else: - raise ValueError() + raise ValueError @pytest.mark.parametrize("order", ["F", "C"]) diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index f048b9e2818f..928121ce8f28 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -48,12 +48,14 @@ "stride_tricks", "tracemalloc_domain" ] +add_newdoc.__module__ = "numpy.lib" + from numpy._pytesttester import PytestTester test = PytestTester(__name__) del PytestTester def __getattr__(attr): - # Warn for reprecated attributes + # Warn for deprecated/removed aliases import math import warnings @@ -67,7 +69,7 @@ def __getattr__(attr): raise AttributeError( "numpy.lib.emath was an alias for emath module that was removed " "in NumPy 2.0. Replace usages of numpy.lib.emath with " - "numpy.emath.", + "numpy.emath.", name=None ) elif attr in ( @@ -78,13 +80,13 @@ def __getattr__(attr): raise AttributeError( f"numpy.lib.{attr} is now private. If you are using a public " "function, it should be available in the main numpy namespace, " - "otherwise check the NumPy 2.0 migration guide.", + "otherwise check the NumPy 2.0 migration guide.", name=None ) elif attr == "arrayterator": raise AttributeError( "numpy.lib.arrayterator submodule is now private. To access " - "Arrayterator class use numpy.lib.Arrayterator.", + "Arrayterator class use numpy.lib.Arrayterator.", name=None ) else: diff --git a/numpy/lib/__init__.pyi b/numpy/lib/__init__.pyi index b8bf2c5afbda..19d6ea7a4d3f 100644 --- a/numpy/lib/__init__.pyi +++ b/numpy/lib/__init__.pyi @@ -1,41 +1,20 @@ -import math as math - -from numpy._pytesttester import PytestTester - -from numpy import ( - ndenumerate as ndenumerate, - ndindex as ndindex, -) - -from numpy.version import version - -from numpy.lib import ( - format as format, - mixins as mixins, - scimath as scimath, - stride_tricks as stride_tricks, - npyio as npyio, - array_utils as array_utils, -) - -from numpy.lib._version import ( - NumpyVersion as NumpyVersion, -) - -from numpy.lib._arrayterator_impl import ( - Arrayterator as Arrayterator, -) - -from numpy._core.multiarray import ( - add_docstring as add_docstring, - tracemalloc_domain as tracemalloc_domain, -) - -from numpy._core.function_base import ( - add_newdoc as add_newdoc, -) - -__all__: list[str] -test: PytestTester - -__version__ = version +from numpy._core.multiarray import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc + +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks # noqa: F401 +from ._version import NumpyVersion +from ._arrayterator_impl import Arrayterator + +__all__ = [ + "Arrayterator", + "add_docstring", + "add_newdoc", + "array_utils", + "introspect", + "mixins", + "NumpyVersion", + "npyio", + "scimath", + "stride_tricks", + "tracemalloc_domain", +] diff --git a/numpy/lib/_array_utils_impl.pyi b/numpy/lib/_array_utils_impl.pyi index a38a62f2813c..11a2aafb8837 100644 --- a/numpy/lib/_array_utils_impl.pyi +++ b/numpy/lib/_array_utils_impl.pyi @@ -1,9 +1,9 @@ -from typing import Any, Iterable, Tuple +from typing import Any, Iterable from numpy import generic from numpy.typing import NDArray -__all__: list[str] +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] # NOTE: In practice `byte_bounds` can (potentially) take any object # implementing the `__array_interface__` protocol. The caveat is @@ -12,11 +12,11 @@ __all__: list[str] def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... def normalize_axis_tuple( - axis: int | Iterable[int], - ndim: int = ..., - argname: None | str = ..., + axis: int | Iterable[int], + ndim: int = ..., + argname: None | str = ..., allow_duplicate: None | bool = ..., -) -> Tuple[int, int]: ... +) -> tuple[int, int]: ... def normalize_axis_index( axis: int = ..., diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 8bdb1b992195..2e190871722b 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -220,7 +220,7 @@ def _get_linear_ramps(padded, axis, width_pair, end_value_pair): end_value_pair, edge_pair, width_pair ) ) - + # Reverse linear space in appropriate dimension right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] @@ -293,7 +293,7 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func): return left_stat, right_stat -def _set_reflect_both(padded, axis, width_pair, method, +def _set_reflect_both(padded, axis, width_pair, method, original_period, include_edge=False): """ Pad `axis` of `arr` with reflection. @@ -323,17 +323,17 @@ def _set_reflect_both(padded, axis, width_pair, method, """ left_pad, right_pad = width_pair old_length = padded.shape[axis] - right_pad - left_pad - + if include_edge: - # Avoid wrapping with only a subset of the original area - # by ensuring period can only be a multiple of the original + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original # area's length. old_length = old_length // original_period * original_period # Edge is included, we need to offset the pad amount by 1 edge_offset = 1 else: - # Avoid wrapping with only a subset of the original area - # by ensuring period can only be a multiple of the original + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original # area's length. old_length = ((old_length - 1) // (original_period - 1) * (original_period - 1) + 1) @@ -595,8 +595,6 @@ def pad(array, pad_width, mode='constant', **kwargs): 'empty' Pads with undefined values. - .. versionadded:: 1.17 - Padding function, see Notes. stat_length : sequence or int, optional @@ -655,8 +653,6 @@ def pad(array, pad_width, mode='constant', **kwargs): Notes ----- - .. versionadded:: 1.7.0 - For an array with rank greater than 1, some of the padding of later axes is calculated from padding of previous axes. This is easiest to think about with a rank 2 array where the corners of the padded array diff --git a/numpy/lib/_arraypad_impl.pyi b/numpy/lib/_arraypad_impl.pyi index 1ac6fc7d91c8..3a2c433c338a 100644 --- a/numpy/lib/_arraypad_impl.pyi +++ b/numpy/lib/_arraypad_impl.pyi @@ -1,9 +1,11 @@ from typing import ( Literal as L, Any, + TypeAlias, overload, TypeVar, Protocol, + type_check_only, ) from numpy import generic @@ -15,8 +17,11 @@ from numpy._typing import ( _ArrayLike, ) +__all__ = ["pad"] + _SCT = TypeVar("_SCT", bound=generic) +@type_check_only class _ModeFunc(Protocol): def __call__( self, @@ -27,7 +32,7 @@ class _ModeFunc(Protocol): /, ) -> None: ... -_ModeKind = L[ +_ModeKind: TypeAlias = L[ "constant", "edge", "linear_ramp", @@ -41,7 +46,6 @@ _ModeKind = L[ "empty", ] -__all__: list[str] # TODO: In practice each keyword argument is exclusive to one or more # specific modes. Consider adding more overloads to express this in the future. diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index 3de2128c1d5c..60b3425682fb 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -177,8 +177,6 @@ def unique(ar, return_index=False, return_inverse=False, that contain objects are not supported if the `axis` kwarg is used. The default is None. - .. versionadded:: 1.13.0 - equal_nan : bool, optional If True, collapses multiple NaN values in the return array into one. @@ -198,11 +196,10 @@ def unique(ar, return_index=False, return_inverse=False, The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. - .. versionadded:: 1.9.0 - See Also -------- repeat : Repeat elements of an array. + sort : Return a sorted copy of an array. Notes ----- @@ -216,17 +213,15 @@ def unique(ar, return_index=False, return_inverse=False, flattened subarrays are sorted in lexicographic order starting with the first element. - .. versionchanged: 1.21 - If nan values are in the input array, a single nan is put - to the end of the sorted unique values. - - Also for complex arrays all NaN values are considered equivalent + .. versionchanged:: 1.21 + Like np.sort, NaN will sort to the end of the values. + For complex arrays all NaN values are considered equivalent (no matter whether the NaN is in the real or imaginary part). As the representant for the returned array the smallest one in the lexicographical order is chosen - see np.sort for how the lexicographical order is defined for complex arrays. - .. versionchanged: 2.0 + .. versionchanged:: 2.0 For multi-dimensional inputs, ``unique_inverse`` is reshaped such that the input can be reconstructed using ``np.take(unique, unique_inverse, axis=axis)``. The result is @@ -288,7 +283,7 @@ def unique(ar, return_index=False, return_inverse=False, """ ar = np.asanyarray(ar) if axis is None: - ret = _unique1d(ar, return_index, return_inverse, return_counts, + ret = _unique1d(ar, return_index, return_inverse, return_counts, equal_nan=equal_nan, inverse_shape=ar.shape, axis=None) return _unpack_tuple(ret) @@ -413,14 +408,14 @@ def _unique_all_dispatcher(x, /): @array_function_dispatch(_unique_all_dispatcher) def unique_all(x): """ - Find the unique elements of an array, and counts, inverse and indices. + Find the unique elements of an array, and counts, inverse, and indices. - This function is an Array API compatible alternative to: + This function is an Array API compatible alternative to:: - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, return_index=True, return_inverse=True, - ... return_counts=True, equal_nan=False) - (array([1, 2]), array([0, 2]), array([0, 0, 1]), array([2, 1])) + np.unique(x, return_index=True, return_inverse=True, + return_counts=True, equal_nan=False) + + but returns a namedtuple for easier access to each output. Parameters ---------- @@ -445,12 +440,16 @@ def unique_all(x): Examples -------- >>> import numpy as np - >>> np.unique_all([1, 1, 2]) - UniqueAllResult(values=array([1, 2]), - indices=array([0, 2]), - inverse_indices=array([0, 0, 1]), - counts=array([2, 1])) - + >>> x = [1, 1, 2] + >>> uniq = np.unique_all(x) + >>> uniq.values + array([1, 2]) + >>> uniq.indices + array([0, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + >>> uniq.counts + array([2, 1]) """ result = unique( x, @@ -471,11 +470,11 @@ def unique_counts(x): """ Find the unique elements and counts of an input array `x`. - This function is an Array API compatible alternative to: + This function is an Array API compatible alternative to:: + + np.unique(x, return_counts=True, equal_nan=False) - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, return_counts=True, equal_nan=False) - (array([1, 2]), array([2, 1])) + but returns a namedtuple for easier access to each output. Parameters ---------- @@ -497,9 +496,12 @@ def unique_counts(x): Examples -------- >>> import numpy as np - >>> np.unique_counts([1, 1, 2]) - UniqueCountsResult(values=array([1, 2]), counts=array([2, 1])) - + >>> x = [1, 1, 2] + >>> uniq = np.unique_counts(x) + >>> uniq.values + array([1, 2]) + >>> uniq.counts + array([2, 1]) """ result = unique( x, @@ -520,11 +522,11 @@ def unique_inverse(x): """ Find the unique elements of `x` and indices to reconstruct `x`. - This function is Array API compatible alternative to: + This function is an Array API compatible alternative to:: - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, return_inverse=True, equal_nan=False) - (array([1, 2]), array([0, 0, 1])) + np.unique(x, return_inverse=True, equal_nan=False) + + but returns a namedtuple for easier access to each output. Parameters ---------- @@ -547,9 +549,12 @@ def unique_inverse(x): Examples -------- >>> import numpy as np - >>> np.unique_inverse([1, 1, 2]) - UniqueInverseResult(values=array([1, 2]), inverse_indices=array([0, 0, 1])) - + >>> x = [1, 1, 2] + >>> uniq = np.unique_inverse(x) + >>> uniq.values + array([1, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) """ result = unique( x, @@ -570,11 +575,9 @@ def unique_values(x): """ Returns the unique elements of an input array `x`. - This function is Array API compatible alternative to: + This function is an Array API compatible alternative to:: - >>> x = np.array([1, 1, 2]) - >>> np.unique(x, equal_nan=False) - array([1, 2]) + np.unique(x, equal_nan=False) Parameters ---------- @@ -632,8 +635,6 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): arrays are returned. The first instance of a value is used if there are multiple. Default is False. - .. versionadded:: 1.15.0 - Returns ------- intersect1d : ndarray @@ -804,8 +805,6 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): 'table' may be faster in most cases. If 'table' is chosen, `assume_unique` will have no effect. - .. versionadded:: 1.8.0 - Returns ------- in1d : (M,) ndarray, bool @@ -833,8 +832,6 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): be automatically selected based only on memory usage, so one may manually set ``kind='table'`` if memory constraints can be relaxed. - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np @@ -908,11 +905,11 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # However, here we set the requirement that by default # the intermediate array can only be 6x # the combined memory allocation of the original - # arrays. See discussion on + # arrays. See discussion on # https://github.com/numpy/numpy/pull/12065. if ( - range_safe_from_overflow and + range_safe_from_overflow and (below_memory_constraint or kind == 'table') ): @@ -1069,7 +1066,6 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, Notes ----- - `isin` is an element-wise function version of the python keyword `in`. ``isin(a, b)`` is roughly equivalent to ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. @@ -1089,8 +1085,6 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, be automatically selected based only on memory usage, so one may manually set ``kind='table'`` if memory constraints can be relaxed. - .. versionadded:: 1.13.0 - Examples -------- >>> import numpy as np diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 95498248f21a..20f2d576bf00 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -7,37 +7,10 @@ from typing import ( SupportsIndex, TypeVar, ) +from typing_extensions import deprecated import numpy as np -from numpy import ( - generic, - number, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - int8, - byte, - intc, - int_, - intp, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) +from numpy import generic, number, int8, intp, timedelta64, object_ from numpy._typing import ( ArrayLike, @@ -50,6 +23,21 @@ from numpy._typing import ( _ArrayLikeNumber_co, ) +__all__ = [ + "ediff1d", + "in1d", + "intersect1d", + "isin", + "setdiff1d", + "setxor1d", + "union1d", + "unique", + "unique_all", + "unique_counts", + "unique_inverse", + "unique_values", +] + _SCT = TypeVar("_SCT", bound=generic) _NumberType = TypeVar("_NumberType", bound=number[Any]) @@ -59,33 +47,17 @@ _NumberType = TypeVar("_NumberType", bound=number[Any]) # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_SCTNoCast = TypeVar( - "_SCTNoCast", +_EitherSCT = TypeVar( + "_EitherSCT", np.bool, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - byte, - intc, - int_, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip class UniqueAllResult(NamedTuple, Generic[_SCT]): values: NDArray[_SCT] @@ -101,8 +73,6 @@ class UniqueInverseResult(NamedTuple, Generic[_SCT]): values: NDArray[_SCT] inverse_indices: NDArray[intp] -__all__: list[str] - @overload def ediff1d( ary: _ArrayLikeBool_co, @@ -325,11 +295,11 @@ def unique_values(x: ArrayLike, /) -> NDArray[Any]: ... @overload def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., return_indices: L[False] = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def intersect1d( ar1: ArrayLike, @@ -339,11 +309,11 @@ def intersect1d( ) -> NDArray[Any]: ... @overload def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., return_indices: L[True] = ..., -) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_EitherSCT], NDArray[intp], NDArray[intp]]: ... @overload def intersect1d( ar1: ArrayLike, @@ -354,10 +324,10 @@ def intersect1d( @overload def setxor1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def setxor1d( ar1: ArrayLike, @@ -374,11 +344,21 @@ def isin( kind: None | str = ..., ) -> NDArray[np.bool]: ... +@deprecated("Use 'isin' instead") +def in1d( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = ..., + invert: bool = ..., + *, + kind: None | str = ..., +) -> NDArray[np.bool]: ... + @overload def union1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], -) -> NDArray[_SCTNoCast]: ... + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], +) -> NDArray[_EitherSCT]: ... @overload def union1d( ar1: ArrayLike, @@ -387,10 +367,10 @@ def union1d( @overload def setdiff1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def setdiff1d( ar1: ArrayLike, diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 146161d0236d..efc529de5cff 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -83,12 +83,14 @@ class Arrayterator: """ + __module__ = "numpy.lib" + def __init__(self, var, buf_size=None): self.var = var self.buf_size = buf_size self.start = [0 for dim in var.shape] - self.stop = [dim for dim in var.shape] + self.stop = list(var.shape) self.step = [1 for dim in var.shape] def __getattr__(self, attr): @@ -141,7 +143,7 @@ def flat(self): A 1-D flat iterator for Arrayterator objects. This iterator returns elements of the array to be iterated over in - `~lib.Arrayterator` one by one. + `~lib.Arrayterator` one by one. It is similar to `flatiter`. See Also diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index fb9c42dd2bbe..58875b3c9301 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -1,26 +1,29 @@ from collections.abc import Generator +from types import EllipsisType from typing import ( Any, + TypeAlias, TypeVar, overload, ) from numpy import ndarray, dtype, generic -from numpy._typing import DTypeLike, NDArray +from numpy._typing import DTypeLike, NDArray, _Shape as _AnyShape -# TODO: Set a shape bound once we've got proper shape support -_Shape = TypeVar("_Shape", bound=Any) +__all__ = ["Arrayterator"] + +# TODO: Rename to ``_ShapeType`` +_Shape = TypeVar("_Shape", bound=_AnyShape) _DType = TypeVar("_DType", bound=dtype[Any]) _ScalarType = TypeVar("_ScalarType", bound=generic) -_Index = ( - ellipsis +_Index: TypeAlias = ( + EllipsisType | int | slice - | tuple[ellipsis | int | slice, ...] + | tuple[EllipsisType | int | slice, ...] ) -__all__: list[str] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has @@ -41,8 +44,8 @@ class Arrayterator(ndarray[_Shape, _DType]): self, var: ndarray[_Shape, _DType], buf_size: None | int = ... ) -> None: ... @overload - def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[Any, _DType]: ... + def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[_AnyShape, _DType]: ... @overload def __array__(self, dtype: DTypeLike, copy: None | bool = ...) -> NDArray[Any]: ... - def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ... - def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ... + def __getitem__(self, index: _Index) -> Arrayterator[_AnyShape, _DType]: ... + def __iter__(self) -> Generator[ndarray[_AnyShape, _DType], None, None]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi new file mode 100644 index 000000000000..9f91fdf893a0 --- /dev/null +++ b/numpy/lib/_datasource.pyi @@ -0,0 +1,31 @@ +from pathlib import Path +from typing import IO, Any, TypeAlias + +from _typeshed import OpenBinaryMode, OpenTextMode + +_Mode: TypeAlias = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ..., + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index d90070e19e8c..3fa9c5f99d95 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -62,7 +62,7 @@ # get_virtual_index : Callable # The function used to compute the virtual_index. # fix_gamma : Callable -# A function used for discret methods to force the index to a specific value. +# A function used for discrete methods to force the index to a specific value. _QuantileMethods = dict( # --- HYNDMAN and FAN METHODS # Discrete methods @@ -169,8 +169,6 @@ def rot90(m, k=1, axes=(0, 1)): The array is rotated in the plane defined by the axes. Axes must be different. - .. versionadded:: 1.12.0 - Returns ------- y : ndarray @@ -254,8 +252,6 @@ def flip(m, axis=None): The shape of the array is preserved, but the elements are reordered. - .. versionadded:: 1.12.0 - Parameters ---------- m : array_like @@ -268,9 +264,6 @@ def flip(m, axis=None): If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. - .. versionchanged:: 1.15.0 - None and tuples of axes are supported - Returns ------- out : array_like @@ -391,7 +384,7 @@ def iterable(y): def _weights_are_valid(weights, a, axis): """Validate weights array. - + We assume, weights is not None. """ wgt = np.asanyarray(weights) @@ -434,9 +427,6 @@ def average(a, axis=None, weights=None, returned=False, *, Axis or axes along which to average `a`. The default, `axis=None`, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. - - .. versionadded:: 1.7.0 - If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. @@ -451,7 +441,7 @@ def average(a, axis=None, weights=None, returned=False, *, The calculation is:: avg = sum(a * weights) / sum(weights) - + where the sum is over all included elements. The only constraint on the values of `weights` is that `sum(weights)` must not be 0. @@ -925,8 +915,6 @@ def copy(a, order='K', subok=False): If True, then sub-classes will be passed-through, otherwise the returned array will be forced to be a base-class array (defaults to False). - .. versionadded:: 1.19.0 - Returns ------- arr : ndarray @@ -1017,17 +1005,12 @@ def gradient(f, *varargs, axis=None, edge_order=1): edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. - - .. versionadded:: 1.9.1 - axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes The default (axis = None) is to calculate the gradient for all the axes of the input array. axis may be negative, in which case it counts from the last to the first axis. - .. versionadded:: 1.11.0 - Returns ------- gradient : ndarray or tuple of ndarray @@ -1409,8 +1392,6 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): of the input array in along all other axes. Otherwise the dimension and shape must match `a` except along axis. - .. versionadded:: 1.16.0 - Returns ------- diff : ndarray @@ -1439,7 +1420,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): >>> np.diff(u8_arr) array([255], dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] - 255 + np.uint8(255) If this is not desirable, then the array should be cast to a larger integer type first: @@ -1553,8 +1534,6 @@ def interp(x, xp, fp, left=None, right=None, period=None): interpolation of angular x-coordinates. Parameters `left` and `right` are ignored if `period` is specified. - .. versionadded:: 1.10.0 - Returns ------- y : float or complex (corresponding to fp) or ndarray @@ -1685,9 +1664,6 @@ def angle(z, deg=False): The counterclockwise angle from the positive real axis on the complex plane in the range ``(-pi, pi]``, with dtype as numpy.float64. - .. versionchanged:: 1.16.0 - This function works on subclasses of ndarray like `ma.array`. - See Also -------- arctan2 @@ -1867,28 +1843,79 @@ def sort_complex(a): return b -def _trim_zeros(filt, trim=None): +def _arg_trim_zeros(filt): + """Return indices of the first and last non-zero element. + + Parameters + ---------- + filt : array_like + Input array. + + Returns + ------- + start, stop : ndarray + Two arrays containing the indices of the first and last non-zero + element in each dimension. + + See also + -------- + trim_zeros + + Examples + -------- + >>> import numpy as np + >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) + (array([2]), array([3])) + """ + nonzero = ( + np.argwhere(filt) + if filt.dtype != np.object_ + # Historically, `trim_zeros` treats `None` in an object array + # as non-zero while argwhere doesn't, account for that + else np.argwhere(filt != 0) + ) + if nonzero.size == 0: + start = stop = np.array([], dtype=np.intp) + else: + start = nonzero.min(axis=0) + stop = nonzero.max(axis=0) + return start, stop + + +def _trim_zeros(filt, trim=None, axis=None): return (filt,) @array_function_dispatch(_trim_zeros) -def trim_zeros(filt, trim='fb'): - """ - Trim the leading and/or trailing zeros from a 1-D array or sequence. +def trim_zeros(filt, trim='fb', axis=None): + """Remove values along a dimension which are zero along all other. Parameters ---------- - filt : 1-D array or sequence + filt : array_like Input array. - trim : str, optional + trim : {"fb", "f", "b"}, optional A string with 'f' representing trim from front and 'b' to trim from - back. Default is 'fb', trim zeros from both front and back of the - array. + back. By default, zeros are trimmed on both sides. + Front and back refer to the edges of a dimension, with "front" refering + to the side with the lowest index 0, and "back" refering to the highest + index (or index -1). + axis : int or sequence, optional + If None, `filt` is cropped such, that the smallest bounding box is + returned that still contains all values which are not zero. + If an axis is specified, `filt` will be sliced in that dimension only + on the sides specified by `trim`. The remaining area will be the + smallest that still contains all values wich are not zero. Returns ------- - trimmed : 1-D array or sequence - The result of trimming the input. The input data type is preserved. + trimmed : ndarray or sequence + The result of trimming the input. The number of dimensions and the + input data type are preserved. + + Notes + ----- + For all-zero arrays, the first axis is trimmed first. Examples -------- @@ -1897,32 +1924,63 @@ def trim_zeros(filt, trim='fb'): >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) - >>> np.trim_zeros(a, 'b') + >>> np.trim_zeros(a, trim='b') array([0, 0, 0, ..., 0, 2, 1]) + Multiple dimensions are supported. + + >>> b = np.array([[0, 0, 2, 3, 0, 0], + ... [0, 1, 0, 3, 0, 0], + ... [0, 0, 0, 0, 0, 0]]) + >>> np.trim_zeros(b) + array([[0, 2, 3], + [1, 0, 3]]) + + >>> np.trim_zeros(b, axis=-1) + array([[0, 2, 3], + [1, 0, 3], + [0, 0, 0]]) + The input data type is preserved, list/tuple in means list/tuple out. >>> np.trim_zeros([0, 1, 2, 0]) [1, 2] """ + filt_ = np.asarray(filt) + + trim = trim.lower() + if trim not in {"fb", "bf", "f", "b"}: + raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + + start, stop = _arg_trim_zeros(filt_) + stop += 1 # Adjust for slicing + + if start.size == 0: + # filt is all-zero -> assign same values to start and stop so that + # resulting slice will be empty + start = stop = np.zeros(filt_.ndim, dtype=np.intp) + else: + if 'f' not in trim: + start = (None,) * filt_.ndim + if 'b' not in trim: + stop = (None,) * filt_.ndim + + if len(start) == 1: + # filt is 1D -> don't use multi-dimensional slicing to preserve + # non-array input types + sl = slice(start[0], stop[0]) + elif axis is None: + # trim all axes + sl = tuple(slice(*x) for x in zip(start, stop)) + else: + # only trim single axis + axis = normalize_axis_index(axis, filt_.ndim) + sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) + + trimmed = filt[sl] + return trimmed - first = 0 - trim = trim.upper() - if 'F' in trim: - for i in filt: - if i != 0.: - break - else: - first = first + 1 - last = len(filt) - if 'B' in trim: - for i in filt[::-1]: - if i != 0.: - break - else: - last = last - 1 - return filt[first:last] def _extract_dispatcher(condition, arr): @@ -2072,7 +2130,7 @@ def disp(mesg, device=None, linefeed=True): "(deprecated in NumPy 2.0)", DeprecationWarning, stacklevel=2 - ) + ) if device is None: device = sys.stdout @@ -2242,17 +2300,13 @@ class vectorize: ``pyfunc.__doc__``. excluded : set, optional Set of strings or integers representing the positional or keyword - arguments for which the function will not be vectorized. These will be + arguments for which the function will not be vectorized. These will be passed directly to `pyfunc` unmodified. - .. versionadded:: 1.7.0 - cache : bool, optional If `True`, then cache the first function call that determines the number of outputs if `otypes` is not provided. - .. versionadded:: 1.7.0 - signature : string, optional Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for vectorized matrix-vector multiplication. If provided, ``pyfunc`` will @@ -2260,8 +2314,6 @@ class vectorize: size of corresponding core dimensions. By default, ``pyfunc`` is assumed to take scalars as input and output. - .. versionadded:: 1.12.0 - Returns ------- out : callable @@ -2335,15 +2387,15 @@ class vectorize: ... while _p: ... res = res*x + _p.pop(0) ... return res - >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) - >>> vpolyval(p=[1, 2, 3], x=[0, 1]) - array([3, 6]) - Positional arguments may also be excluded by specifying their position: + Here, we exclude the zeroth argument from vectorization whether it is + passed by position or keyword. - >>> vpolyval.excluded.add(0) + >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'}) >>> vpolyval([1, 2, 3], x=[0, 1]) array([3, 6]) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) The `signature` argument allows for vectorizing functions that act on non-scalar arrays of fixed length. For example, you can use it for a @@ -2664,20 +2716,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, `fweights` and `aweights` are specified, and ``ddof=0`` will return the simple average. See the notes for the details. The default value is ``None``. - - .. versionadded:: 1.5 fweights : array_like, int, optional 1-D array of integer frequency weights; the number of times each observation vector should be repeated. - - .. versionadded:: 1.10 aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. - - .. versionadded:: 1.10 dtype : data-type, optional Data-type of the result. By default, the return data-type will have at least `numpy.float64` precision. @@ -2772,7 +2818,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, dtype = np.result_type(m, y, np.float64) X = array(m, ndmin=2, dtype=dtype) - if not rowvar and X.shape[0] != 1: + if not rowvar and m.ndim != 1: X = X.T if X.shape[0] == 0: return np.array([]).reshape(0, 0) @@ -3881,13 +3927,9 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): axis : {int, sequence of int, None}, optional Axis or axes along which the medians are computed. The default, axis=None, will compute the median along a flattened version of - the array. - - .. versionadded:: 1.9.0 - - If a sequence of axes, the array is first flattened along the - given axes, then the median is computed along the resulting - flattened axis. + the array. If a sequence of axes, the array is first flattened + along the given axes, then the median is computed along the + resulting flattened axis. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, @@ -3905,8 +3947,6 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `arr`. - .. versionadded:: 1.9.0 - Returns ------- median : ndarray @@ -4051,9 +4091,6 @@ def percentile(a, Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. - - .. versionchanged:: 1.9.0 - A tuple of axes is supported out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, @@ -4095,8 +4132,6 @@ def percentile(a, the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `a`. - .. versionadded:: 1.9.0 - weights : array_like, optional An array of weights associated with the values in `a`. Each value in `a` contributes to the percentile according to its associated weight. @@ -4259,8 +4294,6 @@ def quantile(a, """ Compute the q-th quantile of the data along the specified axis. - .. versionadded:: 1.15.0 - Parameters ---------- a : array_like of real numbers @@ -4433,7 +4466,7 @@ def quantile(a, For weighted quantiles, the coverage conditions still hold. The empirical cumulative distribution is simply replaced by its weighted - version, i.e. + version, i.e. :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. Only ``method="inverted_cdf"`` supports weights. @@ -4633,7 +4666,7 @@ def _get_gamma_mask(shape, default_value, conditioned_value, where): return out -def _discret_interpolation_to_boundaries(index, gamma_condition_fun): +def _discrete_interpolation_to_boundaries(index, gamma_condition_fun): previous = np.floor(index) next = previous + 1 gamma = index - previous @@ -4651,21 +4684,21 @@ def _closest_observation(n, quantiles): # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). # Order is 1-based so for zero-based indexing round to nearest odd index. gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) - return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, - gamma_fun) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) def _inverted_cdf(n, quantiles): gamma_fun = lambda gamma, _: (gamma == 0) - return _discret_interpolation_to_boundaries((n * quantiles) - 1, - gamma_fun) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) def _quantile_ureduce_func( a: np.array, q: np.array, weights: np.array, - axis: int = None, + axis: int | None = None, out=None, overwrite_input: bool = False, method="linear", @@ -4870,6 +4903,13 @@ def _quantile( # returns 2 instead of 1 because 0.4 is not binary representable. if quantiles.dtype.kind == "f": cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 def find_cdf_1d(arr, cdf): indices = np.searchsorted(cdf, quantiles, side="left") @@ -4880,7 +4920,7 @@ def find_cdf_1d(arr, cdf): return result r_shape = arr.shape[1:] - if quantiles.ndim > 0: + if quantiles.ndim > 0: r_shape = quantiles.shape + r_shape if out is None: result = np.empty_like(arr, shape=r_shape) @@ -5083,9 +5123,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): N-D scalar/vector fields over N-D grids, given one-dimensional coordinate arrays x1, x2,..., xn. - .. versionchanged:: 1.9 - 1-D and 0-D cases are allowed. - Parameters ---------- x1, x2,..., xn : array_like @@ -5093,8 +5130,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): indexing : {'xy', 'ij'}, optional Cartesian ('xy', default) or matrix ('ij') indexing of output. See Notes for more details. - - .. versionadded:: 1.7.0 sparse : bool, optional If True the shape of the returned coordinate array for dimension *i* is reduced from ``(N1, ..., Ni, ... Nn)`` to @@ -5105,7 +5140,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): Default is False. - .. versionadded:: 1.7.0 copy : bool, optional If False, a view into the original arrays are returned in order to conserve memory. Default is True. Please note that @@ -5114,8 +5148,6 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): may refer to a single memory location. If you need to write to the arrays, make copies first. - .. versionadded:: 1.7.0 - Returns ------- X1, X2,..., XN : tuple of ndarrays @@ -5248,7 +5280,7 @@ def delete(arr, obj, axis=None): ---------- arr : array_like Input array. - obj : slice, int or array of ints + obj : slice, int, array-like of ints or bools Indicate indices of sub-arrays to remove along the specified axis. .. versionchanged:: 1.19.0 @@ -5430,11 +5462,13 @@ def insert(arr, obj, values, axis=None): ---------- arr : array_like Input array. - obj : int, slice or sequence of ints + obj : slice, int, array-like of ints or bools Object that defines the index or indices before which `values` is inserted. - .. versionadded:: 1.8.0 + .. versionchanged:: 2.1.2 + Boolean indices are now treated as a mask of elements to insert, + rather than being cast to the integers 0 and 1. Support for multiple insertions when `obj` is a single scalar or a sequence with one element (similar to calling insert multiple @@ -5541,18 +5575,10 @@ def insert(arr, obj, values, axis=None): # need to copy obj, because indices will be changed in-place indices = np.array(obj) if indices.dtype == bool: - # See also delete - # 2012-10-11, NumPy 1.8 - warnings.warn( - "in the future insert will treat boolean arrays and " - "array-likes as a boolean index instead of casting it to " - "integer", FutureWarning, stacklevel=2) - indices = indices.astype(intp) - # Code after warning period: - #if obj.ndim != 1: - # raise ValueError('boolean array argument obj to insert ' - # 'must be one dimensional') - #indices = np.flatnonzero(obj) + if obj.ndim != 1: + raise ValueError('boolean array argument obj to insert ' + 'must be one dimensional') + indices = np.flatnonzero(obj) elif indices.ndim > 1: raise ValueError( "index array argument obj to insert must be one dimensional " diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 5dee76e172e5..214ad1f04f4b 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,17 +1,23 @@ -from collections.abc import Sequence, Iterator, Callable, Iterable +from collections.abc import Sequence, Callable, Iterable from typing import ( + Concatenate, Literal as L, Any, + ParamSpec, + TypeAlias, TypeVar, overload, Protocol, SupportsIndex, SupportsInt, - TypeGuard + TypeGuard, + type_check_only ) +from typing_extensions import deprecated +import numpy as np from numpy import ( - vectorize as vectorize, + vectorize, generic, integer, floating, @@ -22,49 +28,89 @@ from numpy import ( timedelta64, datetime64, object_, - bool as bool_, + bool_, _OrderKACF, ) - +from numpy._core.multiarray import bincount from numpy._typing import ( NDArray, ArrayLike, DTypeLike, - _ShapeLike, - _ScalarLike_co, - _DTypeLike, _ArrayLike, + _DTypeLike, + _ShapeLike, + _ArrayLikeBool_co, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, _FloatLike_co, _ComplexLike_co, + _NumberLike_co, + _ScalarLike_co, + _NestedSequence ) -from numpy._core.multiarray import ( - bincount as bincount, -) +__all__ = [ + "select", + "piecewise", + "trim_zeros", + "copy", + "iterable", + "percentile", + "diff", + "gradient", + "angle", + "unwrap", + "sort_complex", + "flip", + "rot90", + "extract", + "place", + "vectorize", + "asarray_chkfinite", + "average", + "bincount", + "digitize", + "cov", + "corrcoef", + "median", + "sinc", + "hamming", + "hanning", + "bartlett", + "blackman", + "kaiser", + "trapezoid", + "trapz", + "i0", + "meshgrid", + "delete", + "insert", + "append", + "interp", + "quantile", +] _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) +# The `{}ss` suffix refers to the Python 3.12 syntax: `**P` +_Pss = ParamSpec("_Pss") _SCT = TypeVar("_SCT", bound=generic) _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_2Tuple = tuple[_T, _T] +_2Tuple: TypeAlias = tuple[_T, _T] +@type_check_only class _TrimZerosSequence(Protocol[_T_co]): def __len__(self) -> int: ... + @overload + def __getitem__(self, key: int, /) -> object: ... + @overload def __getitem__(self, key: slice, /) -> _T_co: ... - def __iter__(self) -> Iterator[Any]: ... - -class _SupportsWriteFlush(Protocol): - def write(self, s: str, /) -> object: ... - def flush(self) -> object: ... - -__all__: list[str] @overload def rot90( @@ -180,23 +226,29 @@ def asarray_chkfinite( order: _OrderKACF = ..., ) -> NDArray[Any]: ... -# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate` -# xref python/mypy#8645 @overload def piecewise( x: _ArrayLike[_SCT], - condlist: ArrayLike, - funclist: Sequence[Any | Callable[..., Any]], - *args: Any, - **kw: Any, + condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], + funclist: Sequence[ + Callable[Concatenate[NDArray[_SCT], _Pss], NDArray[_SCT | Any]] + | _SCT | object + ], + /, + *args: _Pss.args, + **kw: _Pss.kwargs, ) -> NDArray[_SCT]: ... @overload def piecewise( x: ArrayLike, - condlist: ArrayLike, - funclist: Sequence[Any | Callable[..., Any]], - *args: Any, - **kw: Any, + condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], + funclist: Sequence[ + Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] + | object + ], + /, + *args: _Pss.args, + **kw: _Pss.kwargs, ) -> NDArray[Any]: ... def select( @@ -255,24 +307,87 @@ def diff( append: ArrayLike = ..., ) -> NDArray[Any]: ... -@overload +@overload # float scalar def interp( - x: _ArrayLikeFloat_co, + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> float64: ... +@overload # float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, - left: None | _FloatLike_co = ..., - right: None | _FloatLike_co = ..., - period: None | _FloatLike_co = ..., + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, ) -> NDArray[float64]: ... -@overload +@overload # float scalar or array def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLikeComplex_co, - left: None | _ComplexLike_co = ..., - right: None | _ComplexLike_co = ..., - period: None | _FloatLike_co = ..., + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64] | float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128: ... +@overload # complex or float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128 | float64: ... +@overload # complex array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, ) -> NDArray[complex128]: ... +@overload # complex or float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64]: ... +@overload # complex scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128] | complex128: ... +@overload # complex or float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64] | complex128 | float64: ... @overload def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ... @@ -720,6 +835,9 @@ def trapezoid( | NDArray[floating[Any] | complexfloating[Any, Any] | timedelta64 | object_] ): ... +@deprecated("Use 'trapezoid' instead") +def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ... + def meshgrid( *xi: ArrayLike, copy: bool = ..., diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index 45b6500e892d..b361bb4f91ac 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -238,7 +238,6 @@ def _hist_bin_auto(x, range): and is the default in the R language. This method gives good off-the-shelf behaviour. - .. versionchanged:: 1.15.0 If there is limited variance the IQR can be 0, which results in the FD bin width being 0 too. This is not a valid bin width, so ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal. @@ -450,6 +449,10 @@ def _get_bin_edges(a, bins, range, weights): bin_edges = np.linspace( first_edge, last_edge, n_equal_bins + 1, endpoint=True, dtype=bin_type) + if np.any(bin_edges[:-1] >= bin_edges[1:]): + raise ValueError( + f'Too many bins for data range. Cannot create {n_equal_bins} ' + f'finite-sized bins.') return bin_edges, (first_edge, last_edge, n_equal_bins) else: return bin_edges, None @@ -498,7 +501,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): supported for automated bin size selection. 'auto' - Minimum bin width between the 'sturges' and 'fd' estimators. + Minimum bin width between the 'sturges' and 'fd' estimators. Provides good all-around performance. 'fd' (Freedman Diaconis Estimator) @@ -698,8 +701,6 @@ def histogram(a, bins=10, range=None, density=None, weights=None): sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. - .. versionadded:: 1.11.0 - If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. @@ -773,8 +774,6 @@ def histogram(a, bins=10, range=None, density=None, weights=None): >>> np.sum(hist * np.diff(bin_edges)) 1.0 - .. versionadded:: 1.11.0 - Automated Bin Selection Methods example, using 2 peak random data with 2000 points. diff --git a/numpy/lib/_histograms_impl.pyi b/numpy/lib/_histograms_impl.pyi index 138cdb115ef5..e18ab99035b4 100644 --- a/numpy/lib/_histograms_impl.pyi +++ b/numpy/lib/_histograms_impl.pyi @@ -3,6 +3,7 @@ from typing import ( Literal as L, Any, SupportsIndex, + TypeAlias, ) from numpy._typing import ( @@ -10,7 +11,9 @@ from numpy._typing import ( ArrayLike, ) -_BinKind = L[ +__all__ = ["histogram", "histogramdd", "histogram_bin_edges"] + +_BinKind: TypeAlias = L[ "stone", "auto", "doane", @@ -21,8 +24,6 @@ _BinKind = L[ "sturges", ] -__all__: list[str] - def histogram_bin_edges( a: ArrayLike, bins: _BinKind | SupportsIndex | ArrayLike = ..., diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 3014e46130e8..da8fbedc8072 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -447,7 +447,7 @@ def __getitem__(self, key): def __len__(self): return 0 -# separate classes are used here instead of just making r_ = concatentor(0), +# separate classes are used here instead of just making r_ = concatenator(0), # etc. because otherwise we couldn't get the doc string to come out right # in help(r_) @@ -841,8 +841,6 @@ def fill_diagonal(a, val, wrap=False): Notes ----- - .. versionadded:: 1.4.0 - This functionality can be obtained via `diag_indices`, but internally this version uses a much faster implementation that never constructs the indices and uses simple slicing. @@ -970,10 +968,6 @@ def diag_indices(n, ndim=2): -------- diag_indices_from - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np @@ -1036,10 +1030,6 @@ def diag_indices_from(arr): -------- diag_indices - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index f13ab4d96e48..bd508a8b5905 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -12,8 +12,8 @@ import numpy as np from numpy import ( # Circumvent a naming conflict with `AxisConcatenator.matrix` matrix as _Matrix, - ndenumerate as ndenumerate, - ndindex as ndindex, + ndenumerate, + ndindex, ndarray, dtype, str_, @@ -32,23 +32,38 @@ from numpy._typing import ( # DTypes DTypeLike, _SupportsDType, -) -from numpy._core.multiarray import ( - unravel_index as unravel_index, - ravel_multi_index as ravel_multi_index, + # Shapes + _Shape, ) +from numpy._core.multiarray import unravel_index, ravel_multi_index + +__all__ = [ + "ravel_multi_index", + "unravel_index", + "mgrid", + "ogrid", + "r_", + "c_", + "s_", + "index_exp", + "ix_", + "ndenumerate", + "ndindex", + "fill_diagonal", + "diag_indices", + "diag_indices_from", +] + _T = TypeVar("_T") _DType = TypeVar("_DType", bound=dtype[Any]) _BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) _TupType = TypeVar("_TupType", bound=tuple[Any, ...]) _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -__all__: list[str] - @overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ... +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[_Shape, _DType], ...]: ... @overload def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... @overload diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi new file mode 100644 index 000000000000..c1591b1a0251 --- /dev/null +++ b/numpy/lib/_iotools.pyi @@ -0,0 +1,106 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import Any, ClassVar, Final, Literal, TypedDict, overload, type_check_only + +from typing_extensions import TypeVar, Unpack + +import numpy as np +import numpy.typing as npt + +_T = TypeVar("_T") + +@type_check_only +class _ValidationKwargs(TypedDict, total=False): + excludelist: Iterable[str] | None + deletechars: Iterable[str] | None + case_sensitive: Literal["upper", "lower"] | bool | None + replace_space: str + +### + +__docformat__: Final[str] = "restructuredtext en" + +class ConverterError(Exception): ... +class ConverterLockError(ConverterError): ... +class ConversionWarning(UserWarning): ... + +class LineSplitter: + delimiter: str | int | Iterable[int] | None + comments: str + encoding: str | None + + def __init__( + self, + /, + delimiter: str | bytes | int | Iterable[int] | None = None, + comments: str | bytes = "#", + autostrip: bool = True, + encoding: str | None = None, + ) -> None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] + defaultdeletechars: ClassVar[Sequence[str]] + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... + +# +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype[Any]]: ... +def easy_dtype( + ndtype: npt.DTypeLike, + names: Iterable[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_ValidationKwargs], +) -> np.dtype[np.void]: ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 958ebc3cbe82..9d0173dbe340 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -271,8 +271,6 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, @@ -282,8 +280,6 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, `keepdims` will be passed through to the `min` method of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 initial : scalar, optional The maximum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. @@ -405,19 +401,14 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. - - .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. - If the value is anything but the default, then `keepdims` will be passed through to the `max` method of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 initial : scalar, optional The minimum value of an output element. Must be present to allow computation on empty slice. See `~numpy.ufunc.reduce` for details. @@ -666,28 +657,21 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, the platform (u)intp. In that case, the default will be either (u)int32 or (u)int64 depending on whether the platform is 32 or 64 bits. For inexact inputs, dtype must be inexact. - - .. versionadded:: 1.8.0 out : ndarray, optional Alternate output array in which to place the result. The default is ``None``. If provided, it must have the same shape as the expected output, but the type will be cast if necessary. See :ref:`ufuncs-output-type` for more details. The casting of NaN to integer can yield unexpected results. - - .. versionadded:: 1.8.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original `a`. - If the value is anything but the default, then `keepdims` will be passed through to the `mean` or `sum` methods of sub-classes of `ndarray`. If the sub-classes methods does not implement `keepdims` any exceptions will be raised. - - .. versionadded:: 1.8.0 initial : scalar, optional Starting value for the sum. See `~numpy.ufunc.reduce` for details. @@ -759,8 +743,6 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, One is returned for slices that are all-NaN or empty. - .. versionadded:: 1.10.0 - Parameters ---------- a : array_like @@ -842,8 +824,6 @@ def nancumsum(a, axis=None, dtype=None, out=None): Zeros are returned for slices that are all-NaN or empty. - .. versionadded:: 1.12.0 - Parameters ---------- a : array_like @@ -913,8 +893,6 @@ def nancumprod(a, axis=None, dtype=None, out=None): Ones are returned for slices that are all-NaN or empty. - .. versionadded:: 1.12.0 - Parameters ---------- a : array_like @@ -985,8 +963,6 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -1155,8 +1131,6 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu Returns the median of the array elements. - .. versionadded:: 1.9.0 - Parameters ---------- a : array_like @@ -1270,8 +1244,6 @@ def nanpercentile( Returns the qth percentile(s) of the array elements. - .. versionadded:: 1.9.0 - Parameters ---------- a : array_like @@ -1462,8 +1434,6 @@ def nanquantile( while ignoring nan values. Returns the qth quantile(s) of the array elements. - .. versionadded:: 1.15.0 - Parameters ---------- a : array_like @@ -1662,7 +1632,7 @@ def _nanquantile_ureduce_func( a: np.array, q: np.array, weights: np.array, - axis: int = None, + axis: int | None = None, out=None, overwrite_input: bool = False, method="linear", @@ -1755,8 +1725,6 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, For all-NaN slices or slices with zero degrees of freedom, NaN is returned and a `RuntimeWarning` is raised. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -1793,7 +1761,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this var function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -1949,8 +1917,6 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, For all-NaN slices or slices with zero degrees of freedom, NaN is returned and a `RuntimeWarning` is raised. - .. versionadded:: 1.8.0 - Parameters ---------- a : array_like @@ -1992,7 +1958,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this std function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them diff --git a/numpy/lib/_nanfunctions_impl.pyi b/numpy/lib/_nanfunctions_impl.pyi index d81f883f76c3..081b53d8ea44 100644 --- a/numpy/lib/_nanfunctions_impl.pyi +++ b/numpy/lib/_nanfunctions_impl.pyi @@ -18,9 +18,24 @@ from numpy.lib._function_base_impl import ( quantile, ) -__all__: list[str] +__all__ = [ + "nansum", + "nanmax", + "nanmin", + "nanargmax", + "nanargmin", + "nanmean", + "nanmedian", + "nanpercentile", + "nanvar", + "nanstd", + "nanprod", + "nancumsum", + "nancumprod", + "nanquantile", +] -# NOTE: In reaility these functions are not aliases but distinct functions +# NOTE: In reality these functions are not aliases but distinct functions # with identical signatures. nanmin = amin nanmax = amax diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index a83c46b0e654..f0d1bb2b0c68 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -9,7 +9,7 @@ import weakref import contextlib import operator -from operator import itemgetter, index as opindex, methodcaller +from operator import itemgetter from collections.abc import Mapping import pickle @@ -19,7 +19,7 @@ from numpy._core import overrides from numpy._core.multiarray import packbits, unpackbits from numpy._core._multiarray_umath import _load_from_filelike -from numpy._core.overrides import set_array_function_like_doc, set_module +from numpy._core.overrides import finalize_array_function_like, set_module from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, @@ -132,10 +132,6 @@ class NpzFile(Mapping): to getitem access on the `NpzFile` instance itself. allow_pickle : bool, optional Allow loading pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on @@ -340,10 +336,6 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, disallowing pickles include security, as loading pickled data can execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - fix_imports : bool, optional Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. If `fix_imports` @@ -469,7 +461,7 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, # If the file size is less than N, we need to make sure not # to seek past the beginning of the file fid.seek(-min(N, len(magic)), 1) # back-up - if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): + if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)): # zip-file (assume .npz) # Potentially transfer file ownership to NpzFile stack.pop_all() @@ -491,8 +483,10 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, else: # Try a pickle if not allow_pickle: - raise ValueError("Cannot load file containing pickled data " - "when allow_pickle=False") + raise ValueError( + "This file contains pickled (object) data. If you trust " + "the file you can load it unsafely using the " + "`allow_pickle=` keyword argument or `pickle.load()`.") try: return pickle.load(fid, **pickle_kwargs) except Exception as e: @@ -588,13 +582,13 @@ def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): pickle_kwargs=dict(fix_imports=fix_imports)) -def _savez_dispatcher(file, *args, **kwds): +def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): yield from args yield from kwds.values() @array_function_dispatch(_savez_dispatcher) -def savez(file, *args, **kwds): +def savez(file, *args, allow_pickle=True, **kwds): """Save several arrays into a single file in uncompressed ``.npz`` format. Provide arrays as keyword arguments to store them under the @@ -614,6 +608,14 @@ def savez(file, *args, **kwds): Arrays to save to the file. Please use keyword arguments (see `kwds` below) to assign names to arrays. Arrays specified as args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True kwds : Keyword arguments, optional Arrays to save to the file. Each array will be saved to the output file with its corresponding keyword name. @@ -678,16 +680,16 @@ def savez(file, *args, **kwds): array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ - _savez(file, args, kwds, False) + _savez(file, args, kwds, False, allow_pickle=allow_pickle) -def _savez_compressed_dispatcher(file, *args, **kwds): +def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds): yield from args yield from kwds.values() @array_function_dispatch(_savez_compressed_dispatcher) -def savez_compressed(file, *args, **kwds): +def savez_compressed(file, *args, allow_pickle=True, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. @@ -708,6 +710,14 @@ def savez_compressed(file, *args, **kwds): Arrays to save to the file. Please use keyword arguments (see `kwds` below) to assign names to arrays. Arrays specified as args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True kwds : Keyword arguments, optional Arrays to save to the file. Each array will be saved to the output file with its corresponding keyword name. @@ -750,7 +760,7 @@ def savez_compressed(file, *args, **kwds): True """ - _savez(file, args, kwds, True) + _savez(file, args, kwds, True, allow_pickle=allow_pickle) def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): @@ -777,17 +787,17 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) - - for key, val in namedict.items(): - fname = key + '.npy' - val = np.asanyarray(val) - # always force zip64, gh-10776 - with zipf.open(fname, 'w', force_zip64=True) as fid: - format.write_array(fid, val, - allow_pickle=allow_pickle, - pickle_kwargs=pickle_kwargs) - - zipf.close() + try: + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + finally: + zipf.close() def _ensure_ndmin_ndarray_check_param(ndmin): @@ -1116,7 +1126,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', return arr -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, @@ -1167,11 +1177,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. - - .. versionchanged:: 1.11.0 - When a single column has to be read it is possible to use - an integer instead of a tuple. E.g ``usecols = 3`` reads the - fourth column the same way as ``usecols = (3,)`` would. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a @@ -1181,8 +1186,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. - - .. versionadded:: 1.6.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. The special value 'bytes' enables backward compatibility workarounds @@ -1191,7 +1194,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. - .. versionadded:: 1.14.0 .. versionchanged:: 2.0 Before NumPy 2, the default was ``'bytes'`` for Python 2 compatibility. The default is now ``None``. @@ -1202,8 +1204,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, empty lines and comment lines are not counted towards `max_rows`, while such lines are counted in `skiprows`. - .. versionadded:: 1.16.0 - .. versionchanged:: 1.23.0 Lines containing no data, including comment lines (e.g., lines starting with '#' or as specified via `comments`) are not counted @@ -1244,8 +1244,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, subset of up to n columns (where n is the least number of values present in all rows) can be read by specifying the columns via `usecols`. - .. versionadded:: 1.10.0 - The strings produced by the Python float.hex method can be used as input for floats. @@ -1442,31 +1440,20 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', String or character separating columns. newline : str, optional String or character separating lines. - - .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. - - .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. - - .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. - - .. versionadded:: 1.7.0 encoding : {None, str}, optional Encoding used to encode the outputfile. Does not apply to output streams. If the encoding is something other than 'bytes' or 'latin1' you will not be able to load the file in NumPy versions < 1.14. Default is 'latin1'. - .. versionadded:: 1.14.0 - - See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format @@ -1671,6 +1658,7 @@ def fromregex(file, regexp, dtype, encoding=None): .. versionchanged:: 1.22.0 Now accepts `os.PathLike` implementations. + regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. @@ -1679,8 +1667,6 @@ def fromregex(file, regexp, dtype, encoding=None): encoding : str, optional Encoding used to decode the inputfile. Does not apply to input streams. - .. versionadded:: 1.14.0 - Returns ------- output : ndarray @@ -1758,7 +1744,7 @@ def fromregex(file, regexp, dtype, encoding=None): #####-------------------------------------------------------------------------- -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, @@ -1857,8 +1843,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. - - .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` is a file object. The special value 'bytes' enables backward @@ -1868,7 +1852,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, as input to converters. If set to None the system default is used. The default value is 'bytes'. - .. versionadded:: 1.14.0 .. versionchanged:: 2.0 Before NumPy 2, the default was ``'bytes'`` for Python 2 compatibility. The default is now ``None``. @@ -2109,7 +2092,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, user_missing_values = user_missing_values.decode('latin1') # Define the list of missing_values (one column: one list) - missing_values = [list(['']) for _ in range(nbcols)] + missing_values = [[''] for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index f1dcbfd52d01..19257a802d44 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,348 +1,285 @@ -import os -import sys -import zipfile import types +import zipfile +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern -from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable -from typing import ( - Literal as L, - Any, - TypeVar, - Generic, - IO, - overload, - Protocol, -) +from typing import IO, Any, ClassVar, Generic, Protocol, TypeAlias, overload, type_check_only +from typing import Literal as L -from numpy import ( - ndarray, - recarray, - dtype, - generic, - float64, - void, - record, -) +from _typeshed import StrOrBytesPath, StrPath, SupportsKeysAndGetItem, SupportsRead, SupportsWrite +from typing_extensions import Self, TypeVar, deprecated, override +import numpy as np +from numpy._core.multiarray import packbits, unpackbits +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc from numpy.ma.mrecords import MaskedRecords -from numpy._typing import ( - ArrayLike, - DTypeLike, - NDArray, - _DTypeLike, - _SupportsArrayFunc, -) -from numpy._core.multiarray import ( - packbits as packbits, - unpackbits as unpackbits, -) +from ._datasource import DataSource as DataSource -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) -_T_co = TypeVar("_T_co", covariant=True) -_SCT = TypeVar("_SCT", bound=generic) -_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True) -_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True) +__all__ = [ + "fromregex", + "genfromtxt", + "load", + "loadtxt", + "packbits", + "save", + "savetxt", + "savez", + "savez_compressed", + "unpackbits", +] -class _SupportsGetItem(Protocol[_T_contra, _T_co]): - def __getitem__(self, key: _T_contra, /) -> _T_co: ... +_T_co = TypeVar("_T_co", covariant=True) +_SCT = TypeVar("_SCT", bound=np.generic) +_SCT_co = TypeVar("_SCT_co", bound=np.generic, default=Any, covariant=True) -class _SupportsRead(Protocol[_CharType_co]): - def read(self) -> _CharType_co: ... +_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] +_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] +_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[bytes] -class _SupportsReadSeek(Protocol[_CharType_co]): - def read(self, n: int, /) -> _CharType_co: ... +@type_check_only +class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): def seek(self, offset: int, whence: int, /) -> object: ... -class _SupportsWrite(Protocol[_CharType_contra]): - def write(self, s: _CharType_contra, /) -> object: ... - -__all__: list[str] - class BagObj(Generic[_T_co]): - def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str) -> _T_co: ... + def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str, /) -> _T_co: ... def __dir__(self) -> list[str]: ... -class NpzFile(Mapping[str, NDArray[Any]]): +class NpzFile(Mapping[str, NDArray[_SCT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + zip: zipfile.ZipFile - fid: None | IO[str] + fid: IO[str] | None files: list[str] allow_pickle: bool - pickle_kwargs: None | Mapping[str, Any] - _MAX_REPR_ARRAY_COUNT: int - # Represent `f` as a mutable property so we can access the type of `self` - @property - def f(self: _T) -> BagObj[_T]: ... - @f.setter - def f(self: _T, value: BagObj[_T]) -> None: ... + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_SCT_co]] + + # def __init__( self, - fid: IO[str], - own_fid: bool = ..., - allow_pickle: bool = ..., - pickle_kwargs: None | Mapping[str, Any] = ..., - ) -> None: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | types.TracebackType, /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, ) -> None: ... - def close(self) -> None: ... def __del__(self) -> None: ... - def __iter__(self) -> Iterator[str]: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override def __len__(self) -> int: ... - def __getitem__(self, key: str) -> NDArray[Any]: ... - def __contains__(self, key: str) -> bool: ... - def __repr__(self) -> str: ... - -class DataSource: - def __init__( - self, - destpath: None | str | os.PathLike[str] = ..., - ) -> None: ... - def __del__(self) -> None: ... - def abspath(self, path: str) -> str: ... - def exists(self, path: str) -> bool: ... - - # Whether the file-object is opened in string or bytes mode (by default) - # depends on the file-extension of `path` - def open( - self, - path: str, - mode: str = ..., - encoding: None | str = ..., - newline: None | str = ..., - ) -> IO[Any]: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_SCT_co]: ... + def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; # returns an `ndarray`/`memmap` otherwise def load( - file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes], - mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., - allow_pickle: bool = ..., - fix_imports: bool = ..., - encoding: L["ASCII", "latin1", "bytes"] = ..., + file: StrOrBytesPath | _SupportsReadSeek[bytes], + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, ) -> Any: ... -def save( - file: str | os.PathLike[str] | _SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool = ..., - fix_imports: bool = ..., -) -> None: ... +@overload +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... +@overload +@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... +@overload +@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... -def savez( - file: str | os.PathLike[str] | _SupportsWrite[bytes], - *args: ArrayLike, - **kwds: ArrayLike, -) -> None: ... +# +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... -def savez_compressed( - file: str | os.PathLike[str] | _SupportsWrite[bytes], - *args: ArrayLike, - **kwds: ArrayLike, -) -> None: ... +# +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, # optionally, `encoding` @overload def loadtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: None = ..., - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... -) -> NDArray[float64]: ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... @overload def loadtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: _DTypeLike[_SCT], - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_SCT]: ... @overload def loadtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: DTypeLike, - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... def savetxt( - fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes], + fname: StrPath | _FNameWrite, X: ArrayLike, - fmt: str | Sequence[str] = ..., - delimiter: str = ..., - newline: str = ..., - header: str = ..., - footer: str = ..., - comments: str = ..., - encoding: None | str = ..., + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, ) -> None: ... @overload def fromregex( - file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: _DTypeLike[_SCT], - encoding: None | str = ... + encoding: str | None = None, ) -> NDArray[_SCT]: ... @overload def fromregex( - file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: DTypeLike, - encoding: None | str = ... + encoding: str | None = None, ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - dtype: None = ..., + fname: _FName, + dtype: None = None, comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: _DTypeLike[_SCT], comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[_SCT]: ... @overload def genfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: DTypeLike, comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload -def recfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... +def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... @overload -def recfromtxt( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... +def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... @overload -def recfromcsv( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... +def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... @overload -def recfromcsv( - fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... +def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/numpy/lib/_polynomial_impl.pyi b/numpy/lib/_polynomial_impl.pyi index 123f32049939..112ec33d2520 100644 --- a/numpy/lib/_polynomial_impl.pyi +++ b/numpy/lib/_polynomial_impl.pyi @@ -1,5 +1,6 @@ from typing import ( Literal as L, + TypeAlias, overload, Any, SupportsInt, @@ -10,7 +11,7 @@ from typing import ( import numpy as np from numpy import ( - poly1d as poly1d, + poly1d, unsignedinteger, signedinteger, floating, @@ -35,8 +36,8 @@ from numpy._typing import ( _T = TypeVar("_T") -_2Tup = tuple[_T, _T] -_5Tup = tuple[ +_2Tup: TypeAlias = tuple[_T, _T] +_5Tup: TypeAlias = tuple[ _T, NDArray[float64], NDArray[int32], @@ -44,7 +45,19 @@ _5Tup = tuple[ NDArray[float64], ] -__all__: list[str] +__all__ = [ + "poly", + "roots", + "polyint", + "polyder", + "polyadd", + "polysub", + "polymul", + "polydiv", + "polyval", + "poly1d", + "polyfit", +] def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ... diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index d5492c645247..68e9cd2d5337 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -13,27 +13,11 @@ Similarly, `sqrt`, other base logarithms, `power` and trig functions are correctly handled. See their respective docstrings for specific examples. -Functions ---------- - -.. autosummary:: - :toctree: generated/ - - sqrt - log - log2 - logn - log10 - power - arccos - arcsin - arctanh - """ import numpy._core.numeric as nx import numpy._core.numerictypes as nt from numpy._core.numeric import asarray, any -from numpy._core.overrides import array_function_dispatch +from numpy._core.overrides import array_function_dispatch, set_module from numpy.lib._type_check_impl import isreal @@ -199,6 +183,7 @@ def _unary_dispatcher(x): return (x,) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def sqrt(x): """ @@ -254,6 +239,7 @@ def sqrt(x): return nx.sqrt(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def log(x): """ @@ -303,6 +289,7 @@ def log(x): return nx.log(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def log10(x): """ @@ -358,6 +345,7 @@ def _logn_dispatcher(n, x): return (n, x,) +@set_module('numpy.lib.scimath') @array_function_dispatch(_logn_dispatcher) def logn(n, x): """ @@ -395,6 +383,7 @@ def logn(n, x): return nx.log(x)/nx.log(n) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def log2(x): """ @@ -448,6 +437,7 @@ def _power_dispatcher(x, p): return (x, p) +@set_module('numpy.lib.scimath') @array_function_dispatch(_power_dispatcher) def power(x, p): """ @@ -502,6 +492,7 @@ def power(x, p): return nx.power(x, p) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def arccos(x): """ @@ -548,6 +539,7 @@ def arccos(x): return nx.arccos(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def arcsin(x): """ @@ -595,6 +587,7 @@ def arcsin(x): return nx.arcsin(x) +@set_module('numpy.lib.scimath') @array_function_dispatch(_unary_dispatcher) def arctanh(x): """ diff --git a/numpy/lib/_scimath_impl.pyi b/numpy/lib/_scimath_impl.pyi index 589feb15f8ff..43b7110b2923 100644 --- a/numpy/lib/_scimath_impl.pyi +++ b/numpy/lib/_scimath_impl.pyi @@ -10,7 +10,7 @@ from numpy._typing import ( _FloatLike_co, ) -__all__: list[str] +__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"] @overload def sqrt(x: _FloatLike_co) -> Any: ... diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 3e2f2ba7d46c..7d861bb6f2e0 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -66,8 +66,6 @@ def take_along_axis(arr, indices, axis): Functions returning an index along an axis, like `argsort` and `argpartition`, produce suitable indices for this function. - .. versionadded:: 1.15.0 - Parameters ---------- arr : ndarray (Ni..., M, Nk...) @@ -193,8 +191,6 @@ def put_along_axis(arr, indices, values, axis): Functions returning an index along an axis, like `argsort` and `argpartition`, produce suitable indices for this function. - .. versionadded:: 1.15.0 - Parameters ---------- arr : ndarray (Ni..., M, Nk...) @@ -315,9 +311,6 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): kwargs : any Additional named arguments to `func1d`. - .. versionadded:: 1.9.0 - - Returns ------- out : ndarray (Ni..., Nj..., Nk...) @@ -535,11 +528,6 @@ def expand_dims(a, axis): ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will be treated as ``axis == 0``. This behavior is deprecated. - .. versionchanged:: 1.18.0 - A tuple of axes is now supported. Out of range axes as - described above are now forbidden and raise an - `~exceptions.AxisError`. - Returns ------- result : ndarray diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index c765e1e5edf5..5439c533edff 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -7,6 +7,7 @@ from typing import ( Protocol, ParamSpec, Concatenate, + type_check_only, ) import numpy as np @@ -20,7 +21,7 @@ from numpy import ( complexfloating, object_, ) - +from numpy._core.shape_base import vstack as row_stack from numpy._typing import ( ArrayLike, NDArray, @@ -34,12 +35,29 @@ from numpy._typing import ( _ArrayLikeObject_co, ) -from numpy._core.shape_base import vstack +__all__ = [ + "column_stack", + "row_stack", + "dstack", + "array_split", + "split", + "hsplit", + "vsplit", + "dsplit", + "apply_over_axes", + "expand_dims", + "apply_along_axis", + "kron", + "tile", + "take_along_axis", + "put_along_axis", +] _P = ParamSpec("_P") _SCT = TypeVar("_SCT", bound=generic) # Signature of `__array_wrap__` +@type_check_only class _ArrayWrap(Protocol): def __call__( self, @@ -49,13 +67,11 @@ class _ArrayWrap(Protocol): /, ) -> Any: ... +@type_check_only class _SupportsArrayWrap(Protocol): @property def __array_wrap__(self) -> _ArrayWrap: ... - -__all__: list[str] - def take_along_axis( arr: _SCT | NDArray[_SCT], indices: NDArray[integer[Any]], @@ -79,7 +95,7 @@ def apply_along_axis( ) -> NDArray[_SCT]: ... @overload def apply_along_axis( - func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike], + func1d: Callable[Concatenate[NDArray[Any], _P], Any], axis: SupportsIndex, arr: ArrayLike, *args: _P.args, diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index def62523ee0e..d4780783a638 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -3,12 +3,6 @@ An explanation of strides can be found in the :ref:`arrays.ndarray`. -Functions ---------- - -.. autosummary:: - :toctree: generated/ - """ import numpy as np from numpy._core.numeric import normalize_axis_tuple @@ -56,12 +50,8 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True): strides : sequence of int, optional The strides of the new array. Defaults to ``x.strides``. subok : bool, optional - .. versionadded:: 1.10 - If True, subclasses are preserved. writeable : bool, optional - .. versionadded:: 1.12 - If set to False, the returned array will always be readonly. Otherwise it will be writable if the original array was. It is advisable to set this to False if possible (see Notes). @@ -408,10 +398,6 @@ def broadcast_to(array, shape, subok=False): broadcast_arrays broadcast_shapes - Notes - ----- - .. versionadded:: 1.10.0 - Examples -------- >>> import numpy as np diff --git a/numpy/lib/_stride_tricks_impl.pyi b/numpy/lib/_stride_tricks_impl.pyi index cf635f1fb640..e2284115eeb4 100644 --- a/numpy/lib/_stride_tricks_impl.pyi +++ b/numpy/lib/_stride_tricks_impl.pyi @@ -10,9 +10,9 @@ from numpy._typing import ( _ArrayLike ) -_SCT = TypeVar("_SCT", bound=generic) +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] -__all__: list[str] +_SCT = TypeVar("_SCT", bound=generic) class DummyArray: __array_interface__: dict[str, Any] diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index 584efbfc307e..e8815bede891 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -10,7 +10,7 @@ asarray, where, int8, int16, int32, int64, intp, empty, promote_types, diagonal, nonzero, indices ) -from numpy._core.overrides import set_array_function_like_doc, set_module +from numpy._core.overrides import finalize_array_function_like, set_module from numpy._core import overrides from numpy._core import iinfo from numpy.lib._stride_tricks_impl import broadcast_to @@ -160,7 +160,7 @@ def flipud(m): return m[::-1, ...] -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): """ @@ -181,8 +181,6 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. - - .. versionadded:: 1.14.0 device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. @@ -375,7 +373,7 @@ def diagflat(v, k=0): return conv.wrap(res) -@set_array_function_like_doc +@finalize_array_function_like @set_module('numpy') def tri(N, M=None, k=0, dtype=float, *, like=None): """ @@ -569,8 +567,6 @@ def vander(x, N=None, increasing=False): Order of the powers of the columns. If True, the powers increase from left to right, if False (the default) they are reversed. - .. versionadded:: 1.9.0 - Returns ------- out : ndarray @@ -860,10 +856,6 @@ def mask_indices(n, mask_func, k=0): -------- triu, tril, triu_indices, tril_indices - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np @@ -912,8 +904,6 @@ def tril_indices(n, k=0, m=None): k : int, optional Diagonal offset (see `tril` for details). m : int, optional - .. versionadded:: 1.9.0 - The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. @@ -922,8 +912,9 @@ def tril_indices(n, k=0, m=None): Returns ------- inds : tuple of arrays - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the correspdonding column indices are + strictly increasing for each row. See also -------- @@ -931,10 +922,6 @@ def tril_indices(n, k=0, m=None): mask_indices : generic function accepting an arbitrary mask function. tril, triu - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np @@ -944,8 +931,11 @@ def tril_indices(n, k=0, m=None): diagonals further right: >>> il1 = np.tril_indices(4) - >>> il2 = np.tril_indices(4, 2) + >>> il1 + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. Here is how they can be used with a sample array: >>> a = np.arange(16).reshape(4, 4) @@ -971,6 +961,7 @@ def tril_indices(n, k=0, m=None): These cover almost the whole array (two diagonals right of the main one): + >>> il2 = np.tril_indices(4, 2) >>> a[il2] = -10 >>> a array([[-10, -10, -10, 3], @@ -1041,11 +1032,6 @@ def tril_indices_from(arr, k=0): See Also -------- tril_indices, tril, triu_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - """ if arr.ndim != 2: raise ValueError("input array must be 2-d") @@ -1065,8 +1051,6 @@ def triu_indices(n, k=0, m=None): k : int, optional Diagonal offset (see `triu` for details). m : int, optional - .. versionadded:: 1.9.0 - The column dimension of the arrays for which the returned arrays will be valid. By default `m` is taken equal to `n`. @@ -1075,9 +1059,9 @@ def triu_indices(n, k=0, m=None): Returns ------- inds : tuple, shape(2) of ndarrays, shape(`n`) - The indices for the triangle. The returned tuple contains two arrays, - each with the indices along one dimension of the array. Can be used - to slice a ndarray of shape(`n`, `n`). + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the correspdonding column indices are + strictly increasing for each row. See also -------- @@ -1085,10 +1069,6 @@ def triu_indices(n, k=0, m=None): mask_indices : generic function accepting an arbitrary mask function. triu, tril - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np @@ -1098,7 +1078,11 @@ def triu_indices(n, k=0, m=None): diagonals further right: >>> iu1 = np.triu_indices(4) - >>> iu2 = np.triu_indices(4, 2) + >>> iu1 + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. Here is how they can be used with a sample array: @@ -1126,6 +1110,7 @@ def triu_indices(n, k=0, m=None): These cover only a small part of the whole array (two diagonals right of the main one): + >>> iu2 = np.triu_indices(4, 2) >>> a[iu2] = -10 >>> a array([[ -1, -1, -10, -10], @@ -1197,11 +1182,6 @@ def triu_indices_from(arr, k=0): See Also -------- triu_indices, triu, tril_indices_from - - Notes - ----- - .. versionadded:: 1.4.0 - """ if arr.ndim != 2: raise ValueError("input array must be 2-d") diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 4096976871d7..e748e91fb908 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -1,7 +1,7 @@ -import builtins from collections.abc import Callable, Sequence from typing import ( Any, + TypeAlias, overload, TypeVar, Literal as L, @@ -16,6 +16,7 @@ from numpy import ( int_, intp, float64, + complex128, signedinteger, floating, complexfloating, @@ -29,6 +30,7 @@ from numpy._typing import ( ArrayLike, _ArrayLike, NDArray, + _SupportsArray, _SupportsArrayFunc, _ArrayLikeInt_co, _ArrayLikeFloat_co, @@ -36,17 +38,33 @@ from numpy._typing import ( _ArrayLikeObject_co, ) +__all__ = [ + "diag", + "diagflat", + "eye", + "fliplr", + "flipud", + "tri", + "triu", + "tril", + "vander", + "histogram2d", + "mask_indices", + "tril_indices", + "tril_indices_from", + "triu_indices", + "triu_indices_from", +] + _T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc = Callable[ +_MaskFunc: TypeAlias = Callable[ [NDArray[int_], _T], NDArray[number[Any] | np.bool | timedelta64 | datetime64 | object_], ] -__all__: list[str] - @overload def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @overload @@ -164,44 +182,220 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... + +_Int_co: TypeAlias = np.integer[Any] | np.bool +_Float_co: TypeAlias = np.floating[Any] | _Int_co +_Number_co: TypeAlias = np.number[Any] | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike2D: TypeAlias = ( + _SupportsArray[np.dtype[_SCT]] + | Sequence[_ArrayLike1D[_SCT]] +) + +_ArrayLike1DInt_co: TypeAlias = ( + _SupportsArray[np.dtype[_Int_co]] + | Sequence[int | _Int_co] +) +_ArrayLike1DFloat_co: TypeAlias = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[float | int | _Float_co] +) +_ArrayLike2DFloat_co: TypeAlias = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[_ArrayLike1DFloat_co] +) +_ArrayLike1DNumber_co: TypeAlias = ( + _SupportsArray[np.dtype[_Number_co]] + | Sequence[int | float | complex | _Number_co] +) + +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) + @overload -def histogram2d( # type: ignore[misc] - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, +def histogram2d( + x: _ArrayLike1D[_SCT_complex], + y: _ArrayLike1D[_SCT_complex | _Float_co], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[floating[Any]], - NDArray[floating[Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... @overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, + x: _ArrayLike1D[_SCT_complex | _Float_co], + y: _ArrayLike1D[_SCT_complex], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[complexfloating[Any, Any]], - NDArray[complexfloating[Any, Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... -@overload # TODO: Sort out `bins` +@overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: Sequence[_ArrayLikeInt_co], - range: None | _ArrayLikeFloat_co = ..., + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact | _Int_co], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact | _Int_co], + y: _ArrayLike1D[_SCT_inexact], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_SCT_number_co] | Sequence[_ArrayLike1D[_SCT_number_co]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co], + NDArray[_SCT_number_co], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | _SCT_inexact], + NDArray[_SCT_number_co | _SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | float64], + NDArray[_SCT_number_co | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | complex128 | float64], + NDArray[_SCT_number_co | complex128 | float64] , +]: ... + +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex | float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[Any], - NDArray[Any], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], ]: ... # NOTE: we're assuming/demanding here the `mask_func` returns diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 5f662f6eb34e..e5c9ffbbb8d4 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -398,28 +398,18 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. - - .. versionadded:: 1.13 nan : int, float, optional Value to be used to fill NaN values. If no value is passed then NaN values will be replaced with 0.0. - - .. versionadded:: 1.17 posinf : int, float, optional Value to be used to fill positive infinity values. If no value is passed then positive infinity values will be replaced with a very large number. - - .. versionadded:: 1.17 neginf : int, float, optional Value to be used to fill negative infinity values. If no value is passed then negative infinity values will be replaced with a very small (or negative) number. - .. versionadded:: 1.17 - - - Returns ------- out : ndarray diff --git a/numpy/lib/_type_check_impl.pyi b/numpy/lib/_type_check_impl.pyi index 6cc5073b8e20..e195238103fa 100644 --- a/numpy/lib/_type_check_impl.pyi +++ b/numpy/lib/_type_check_impl.pyi @@ -1,49 +1,46 @@ from collections.abc import Container, Iterable -from typing import ( - Literal as L, - Any, - overload, - TypeVar, - Protocol, -) +from typing import Literal as L, Any, overload, TypeVar import numpy as np from numpy import ( + _HasRealAndImag, dtype, generic, floating, - float64, complexfloating, integer, ) from numpy._typing import ( ArrayLike, - DTypeLike, NBitBase, NDArray, _64Bit, _SupportsDType, _ScalarLike_co, _ArrayLike, - _DTypeLikeComplex, ) +__all__ = [ + "iscomplexobj", + "isrealobj", + "imag", + "iscomplex", + "isreal", + "nan_to_num", + "real", + "real_if_close", + "typename", + "mintypecode", + "common_type", +] + _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _SCT = TypeVar("_SCT", bound=generic) _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) -class _SupportsReal(Protocol[_T_co]): - @property - def real(self) -> _T_co: ... - -class _SupportsImag(Protocol[_T_co]): - @property - def imag(self) -> _T_co: ... - -__all__: list[str] def mintypecode( typechars: Iterable[str | ArrayLike], @@ -52,12 +49,12 @@ def mintypecode( ) -> str: ... @overload -def real(val: _SupportsReal[_T]) -> _T: ... +def real(val: _HasRealAndImag[_T, Any]) -> _T: ... @overload def real(val: ArrayLike) -> NDArray[Any]: ... @overload -def imag(val: _SupportsImag[_T]) -> _T: ... +def imag(val: _HasRealAndImag[Any, _T]) -> _T: ... @overload def imag(val: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 3fc5a32d33a6..695aab1b8922 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -7,8 +7,6 @@ import numpy._core.numeric as nx from numpy._core.overrides import array_function_dispatch -import warnings -import functools def _dispatcher(x, out=None): @@ -21,12 +19,12 @@ def fix(x, out=None): Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. + The rounded values have the same data-type as the input. Parameters ---------- x : array_like - An array of floats to be rounded + An array to be rounded out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a @@ -35,12 +33,12 @@ def fix(x, out=None): Returns ------- out : ndarray of floats - A float array with the same dimensions as the input. - If second argument is not supplied then a float array is returned + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned with the rounded values. If a second argument is supplied the result is stored there. - The return value `out` is then a reference to that array. + The return value ``out`` is then a reference to that array. See Also -------- @@ -53,7 +51,7 @@ def fix(x, out=None): >>> np.fix(3.14) 3.0 >>> np.fix(3) - 3.0 + 3 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.]) diff --git a/numpy/lib/_ufunclike_impl.pyi b/numpy/lib/_ufunclike_impl.pyi index dd927bc62158..8d87ae8bf4c6 100644 --- a/numpy/lib/_ufunclike_impl.pyi +++ b/numpy/lib/_ufunclike_impl.pyi @@ -9,9 +9,9 @@ from numpy._typing import ( _ArrayLikeObject_co, ) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +__all__ = ["fix", "isneginf", "isposinf"] -__all__: list[str] +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) @overload def fix( # type: ignore[misc] diff --git a/numpy/lib/_user_array_impl.py b/numpy/lib/_user_array_impl.py index c26fa4435e92..cae6e0556687 100644 --- a/numpy/lib/_user_array_impl.py +++ b/numpy/lib/_user_array_impl.py @@ -13,8 +13,10 @@ bitwise_xor, invert, less, less_equal, not_equal, equal, greater, greater_equal, shape, reshape, arange, sin, sqrt, transpose ) +from numpy._core.overrides import set_module +@set_module("numpy.lib.user_array") class container: """ container(data, dtype=None, copy=True) diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi new file mode 100644 index 000000000000..d5dfb0573c71 --- /dev/null +++ b/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,220 @@ +from types import EllipsisType +from typing import Any, Generic, SupportsIndex, TypeAlias, TypeVar, overload + +from _typeshed import Incomplete +from typing_extensions import Self, deprecated, override + +import numpy as np +import numpy.typing as npt +from numpy._typing import _ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=Any, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], default=np.dtype[Any], covariant=True) + +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_RealContainerT = TypeVar( + "_RealContainerT", + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], +) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) + +_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] + +_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None +_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] +_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +### + +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[_ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[Any, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype[Any]]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + + # + def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __div__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rdiv__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __idiv__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + + # + def copy(self, /) -> Self: ... + @deprecated("tostring() is deprecated. Use tobytes() instead.") + def tostring(self, /) -> bytes: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 0c5d08ee7d9c..c2f0f31d7bfc 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -2,7 +2,6 @@ import sys import textwrap import types -import re import warnings import functools import platform @@ -178,7 +177,7 @@ def newfunc(*args, **kwds): skip += len(line) + 1 doc = doc[skip:] depdoc = textwrap.indent(depdoc, ' ' * indent) - doc = '\n\n'.join([depdoc, doc]) + doc = f'{depdoc}\n\n{doc}' newfunc.__doc__ = doc return newfunc diff --git a/numpy/lib/_utils_impl.pyi b/numpy/lib/_utils_impl.pyi index b1453874e85e..2a9eb76a5b38 100644 --- a/numpy/lib/_utils_impl.pyi +++ b/numpy/lib/_utils_impl.pyi @@ -1,33 +1,7 @@ -from typing import ( - Any, - TypeVar, - Protocol, -) +from _typeshed import SupportsWrite -from numpy._core.numerictypes import ( - issubdtype as issubdtype, -) - -_T_contra = TypeVar("_T_contra", contravariant=True) - -# A file-like object opened in `w` mode -class _SupportsWrite(Protocol[_T_contra]): - def write(self, s: _T_contra, /) -> Any: ... - -__all__: list[str] +__all__ = ["get_include", "info", "show_runtime"] def get_include() -> str: ... - -def info( - object: object = ..., - maxwidth: int = ..., - output: None | _SupportsWrite[str] = ..., - toplevel: str = ..., -) -> None: ... - -def source( - object: object, - output: None | _SupportsWrite[str] = ..., -) -> None: ... - def show_runtime() -> None: ... +def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index 7dec3243b883..929f8a1c6685 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -31,8 +31,6 @@ class NumpyVersion: `NumpyVersion` instance. Note that all development versions of the same (pre-)release compare equal. - .. versionadded:: 1.9.0 - Parameters ---------- vstring : str @@ -52,6 +50,8 @@ class NumpyVersion: """ + __module__ = "numpy.lib" + def __init__(self, vstring): self.vstring = vstring ver_main = re.match(r'\d+\.\d+\.\d+', vstring) diff --git a/numpy/lib/_version.pyi b/numpy/lib/_version.pyi index 1c82c99b686e..c53ef795f926 100644 --- a/numpy/lib/_version.pyi +++ b/numpy/lib/_version.pyi @@ -1,4 +1,4 @@ -__all__: list[str] +__all__ = ["NumpyVersion"] class NumpyVersion: vstring: str diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 8e14dfe4bcab..a22c096b246c 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -172,6 +172,7 @@ __all__ = [] +drop_metadata.__module__ = "numpy.lib.format" EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} MAGIC_PREFIX = b'\x93NUMPY' @@ -271,6 +272,8 @@ def dtype_to_descr(dtype): warnings.warn("metadata on a dtype is not saved to an npy/npz. " "Use another format (such as pickle) to store it.", UserWarning, stacklevel=2) + dtype = new_dtype + if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get @@ -481,8 +484,6 @@ def write_array_header_2_0(fp, d): """ Write the header for an array using the 2.0 format. The 2.0 format allows storing very large structured arrays. - .. versionadded:: 1.9.0 - Parameters ---------- fp : filelike object @@ -535,8 +536,6 @@ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): This will leave the file object located just after the header. - .. versionadded:: 1.9.0 - Parameters ---------- fp : filelike object @@ -772,10 +771,6 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, and time. allow_pickle : bool, optional Whether to allow writing pickled data. Default: False - - .. versionchanged:: 1.16.3 - Made default False in response to CVE-2019-6446. - pickle_kwargs : dict Additional keyword arguments to pass to pickle.load. These are only useful when loading object arrays saved on Python 2 when using diff --git a/numpy/lib/format.pyi b/numpy/lib/format.pyi index a4468f52f464..57c7e1e206e0 100644 --- a/numpy/lib/format.pyi +++ b/numpy/lib/format.pyi @@ -1,6 +1,6 @@ -from typing import Any, Literal, Final +from typing import Literal, Final -__all__: list[str] +__all__: list[str] = [] EXPECTED_KEYS: Final[set[str]] MAGIC_PREFIX: Final[bytes] diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 70e638d4dde1..4826440dd410 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -83,11 +83,10 @@ def opt_func_info(func_name=None, signature=None): for k, v in matching_funcs.items(): matching_chars = {} for chars, targets in v.items(): - if any([ - sig_pattern.search(c) or - sig_pattern.search(dtype(c).name) + if any( + sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars - ]): + ): matching_chars[chars] = targets if matching_chars: matching_sigs[k] = matching_chars diff --git a/numpy/lib/introspect.pyi b/numpy/lib/introspect.pyi new file mode 100644 index 000000000000..7929981cd636 --- /dev/null +++ b/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py index a15bdeeac104..5e78ac0990b3 100644 --- a/numpy/lib/mixins.py +++ b/numpy/lib/mixins.py @@ -116,7 +116,7 @@ class that simply wraps a NumPy array and ensures that the result of any ... else: ... # one return value ... return type(self)(result) - ... + ... ... def __repr__(self): ... return '%s(%r)' % (type(self).__name__, self.value) @@ -137,7 +137,6 @@ class that simply wraps a NumPy array and ensures that the result of any with arbitrary, unrecognized types. This ensures that interactions with ArrayLike preserve a well-defined casting hierarchy. - .. versionadded:: 1.13 """ __slots__ = () # Like np.ndarray, this mixin class implements "Option 1" from the ufunc diff --git a/numpy/lib/mixins.pyi b/numpy/lib/mixins.pyi index dfabe3d89053..d13d0fe81df4 100644 --- a/numpy/lib/mixins.pyi +++ b/numpy/lib/mixins.pyi @@ -3,7 +3,7 @@ from typing import Literal as L, Any from numpy import ufunc -__all__: list[str] +__all__ = ["NDArrayOperatorsMixin"] # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, # even though it's reliant on subclasses implementing `__array_ufunc__` diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index c3258e88d04f..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,4 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, NpzFile as NpzFile, + __doc__ as __doc__, ) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index ab16d1f9f1aa..8f4bae4f4721 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -6,17 +6,13 @@ """ import itertools + import numpy as np import numpy.ma as ma -from numpy import ndarray -from numpy.ma import MaskedArray -from numpy.ma.mrecords import MaskedRecords +import numpy.ma.mrecords as mrec from numpy._core.overrides import array_function_dispatch -from numpy._core.records import recarray from numpy.lib._iotools import _is_string_like -_check_fill_value = np.ma.core._check_fill_value - __all__ = [ 'append_fields', 'apply_along_fields', 'assign_fields_by_name', @@ -241,7 +237,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): lastname : optional Last processed field name (used internally during recursion). parents : dictionary - Dictionary of parent fields (used interbally during recursion). + Dictionary of parent fields (used internally during recursion). Examples -------- @@ -267,7 +263,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): parents[name] = [] parents.update(get_fieldstructure(current, name, parents)) else: - lastparent = [_ for _ in (parents.get(lastname, []) or [])] + lastparent = list((parents.get(lastname, []) or [])) if lastparent: lastparent.append(lastname) elif lastname: @@ -334,15 +330,15 @@ def _fix_output(output, usemask=True, asrecarray=False): Private function: return a recarray, a ndarray, a MaskedArray or a MaskedRecords depending on the input parameters """ - if not isinstance(output, MaskedArray): + if not isinstance(output, ma.MaskedArray): usemask = False if usemask: if asrecarray: - output = output.view(MaskedRecords) + output = output.view(mrec.MaskedRecords) else: output = ma.filled(output) if asrecarray: - output = output.view(recarray) + output = output.view(np.recarray) return output @@ -418,7 +414,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, if (len(seqarrays) == 1): seqarrays = np.asanyarray(seqarrays[0]) # Do we have a single ndarray as input ? - if isinstance(seqarrays, (ndarray, np.void)): + if isinstance(seqarrays, (np.ndarray, np.void)): seqdtype = seqarrays.dtype # Make sure we have named fields if seqdtype.names is None: @@ -429,13 +425,13 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, # Find what type of array we must return if usemask: if asrecarray: - seqtype = MaskedRecords + seqtype = mrec.MaskedRecords else: - seqtype = MaskedArray + seqtype = ma.MaskedArray elif asrecarray: - seqtype = recarray + seqtype = np.recarray else: - seqtype = ndarray + seqtype = np.ndarray return seqarrays.view(dtype=seqdtype, type=seqtype) else: seqarrays = (seqarrays,) @@ -459,8 +455,8 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, mask = ma.getmaskarray(a).ravel() # Get the filling value (if needed) if nbmissing: - fval = _check_fill_value(fill_value, a.dtype) - if isinstance(fval, (ndarray, np.void)): + fval = mrec._check_fill_value(fill_value, a.dtype) + if isinstance(fval, (np.ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] fmsk = True @@ -478,15 +474,15 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength), mask=list(_izip_records(seqmask, flatten=flatten))) if asrecarray: - output = output.view(MaskedRecords) + output = output.view(mrec.MaskedRecords) else: # Same as before, without the mask we don't need... for (a, n) in zip(seqarrays, sizes): nbmissing = (maxlength - n) data = a.ravel().__array__() if nbmissing: - fval = _check_fill_value(fill_value, a.dtype) - if isinstance(fval, (ndarray, np.void)): + fval = mrec._check_fill_value(fill_value, a.dtype) + if isinstance(fval, (np.ndarray, np.void)): if len(fval.dtype) == 1: fval = fval.item()[0] else: @@ -497,7 +493,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)), dtype=newdtype, count=maxlength) if asrecarray: - output = output.view(recarray) + output = output.view(np.recarray) # And we're done... return output @@ -513,10 +509,6 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): Nested fields are supported. - .. versionchanged:: 1.18.0 - `drop_fields` returns an array with 0 fields if all fields are dropped, - rather than returning ``None`` as it did previously. - Parameters ---------- base : array @@ -1212,7 +1204,7 @@ def apply_along_fields(func, arr): Returns ------- out : ndarray - Result of the recution operation + Result of the reduction operation Examples -------- @@ -1371,7 +1363,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, dtype=[('A', 'S3'), ('B', ' _VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype[Any]]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays( + seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype[Any]]] | np.ndarray[_ShapeT, np.dtype[Any]], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[Any, np.dtype[np.void]]: ... + +# +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields( + base: MaskedRecords[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.recarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +@overload +def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +@overload +def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[_ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields( + func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], + arr: np.ndarray[_ShapeT, np.dtype[np.void]], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields( + array: np.ndarray[_ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays( + arrays: _ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> _ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... + +# +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/numpy/lib/scimath.pyi b/numpy/lib/scimath.pyi index a149cdc34644..cff5b9097fae 100644 --- a/numpy/lib/scimath.pyi +++ b/numpy/lib/scimath.pyi @@ -1,12 +1,12 @@ from ._scimath_impl import ( __all__ as __all__, - sqrt as sqrt, - log as log, - log2 as log2, - logn as logn, - log10 as log10, - power as power, - arccos as arccos, - arcsin as arcsin, + sqrt as sqrt, + log as log, + log2 as log2, + logn as logn, + log10 as log10, + power as power, + arccos as arccos, + arcsin as arcsin, arctanh as arctanh, ) diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index ef3319e901a0..6c1247db8e0c 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -867,12 +867,12 @@ def test_check_03(self): a = np.pad([1, 2, 3], 4, 'reflect') b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) - + def test_check_04(self): a = np.pad([1, 2, 3], [1, 10], 'reflect') b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) assert_array_equal(a, b) - + def test_check_05(self): a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') b = np.array( @@ -883,12 +883,12 @@ def test_check_05(self): 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2]) assert_array_equal(a, b) - + def test_check_06(self): a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') b = np.array( - [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, - 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, 3] ) assert_array_equal(a, b) @@ -896,11 +896,11 @@ def test_check_06(self): def test_check_07(self): a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') b = np.array( - [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, - 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, - 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, - 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, - 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, 5, 4]) assert_array_equal(a, b) @@ -1175,7 +1175,7 @@ def test_repeated_wrapping(self): a = np.arange(5) b = np.pad(a, (0, 12), mode="wrap") assert_array_equal(np.r_[a, a, a, a][:-3], b) - + def test_repeated_wrapping_multiple_origin(self): """ Assert that 'wrap' pads only with multiples of the original area if diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index b613fa3e736d..d9721266036d 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -270,7 +270,7 @@ def assert_isin_equal(a, b): assert_isin_equal(empty_array, empty_array) @pytest.mark.parametrize("kind", [None, "sort", "table"]) - def test_isin(self, kind): + def test_isin_additional(self, kind): # we use two different sizes for the b array here to test the # two different paths in isin(). for mult in (1, 10): diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..f237dffbc244 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -685,8 +685,8 @@ def test_version_2_0_memmap(tmpdir): # requires more than 2 byte for header dt = [(("%d" % i) * 100, float) for i in range(500)] d = np.ones(1000, dtype=dt) - tf1 = os.path.join(tmpdir, f'version2_01.npy') - tf2 = os.path.join(tmpdir, f'version2_02.npy') + tf1 = os.path.join(tmpdir, 'version2_01.npy') + tf2 = os.path.join(tmpdir, 'version2_02.npy') # 1.0 requested but data cannot be saved this way assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype, @@ -713,12 +713,12 @@ def test_version_2_0_memmap(tmpdir): @pytest.mark.parametrize("mmap_mode", ["r", None]) def test_huge_header(tmpdir, mmap_mode): - f = os.path.join(tmpdir, f'large_header.npy') + f = os.path.join(tmpdir, 'large_header.npy') arr = np.array(1, dtype="i,"*10000+"i") with pytest.warns(UserWarning, match=".*format 2.0"): np.save(f, arr) - + with pytest.raises(ValueError, match="Header.*large"): np.load(f, mmap_mode=mmap_mode) @@ -732,12 +732,12 @@ def test_huge_header(tmpdir, mmap_mode): assert_array_equal(res, arr) def test_huge_header_npz(tmpdir): - f = os.path.join(tmpdir, f'large_header.npz') + f = os.path.join(tmpdir, 'large_header.npz') arr = np.array(1, dtype="i,"*10000+"i") with pytest.warns(UserWarning, match=".*format 2.0"): np.savez(f, arr=arr) - + # Only getting the array from the file actually reads it with pytest.raises(ValueError, match="Header.*large"): np.load(f)["arr"] @@ -998,32 +998,30 @@ def test_header_growth_axis(): assert len(fp.getvalue()) == expected_header_length -@pytest.mark.parametrize('dt, fail', [ - (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', - metadata={'some': 'stuff'})]}), True), - (np.dtype(int, metadata={'some': 'stuff'}), False), - (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False), +@pytest.mark.parametrize('dt', [ + np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', + metadata={'some': 'stuff'})]}), + np.dtype(int, metadata={'some': 'stuff'}), + np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), # recursive: metadata on the field of a dtype - (np.dtype({'names': ['a', 'b'], 'formats': [ + np.dtype({'names': ['a', 'b'], 'formats': [ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) - ]}), False) + ]}), ]) @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), reason="PyPy bug in error formatting") -def test_metadata_dtype(dt, fail): +def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() with assert_warns(UserWarning): np.save(buf, arr) buf.seek(0) - if fail: - with assert_raises(ValueError): - np.load(buf) - else: - arr2 = np.load(buf) - # BUG: assert_array_equal does not check metadata - from numpy.lib._utils_impl import drop_metadata - assert_array_equal(arr, arr2) - assert drop_metadata(arr.dtype) is not arr.dtype - assert drop_metadata(arr2.dtype) is arr2.dtype + + # Loading should work (metadata was stripped): + arr2 = np.load(buf) + # BUG: assert_array_equal does not check metadata + from numpy.lib._utils_impl import drop_metadata + assert_array_equal(arr, arr2) + assert drop_metadata(arr.dtype) is not arr.dtype + assert drop_metadata(arr2.dtype) is arr2.dtype diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index bc3ce6409f1c..ed59a4a86181 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -557,13 +557,9 @@ def test_basic(self): b = np.array([0, 1], dtype=np.float64) assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) assert_equal(insert(b, [], []), b) - # Bools will be treated differently in the future: - # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9]) - with warnings.catch_warnings(record=True) as w: - warnings.filterwarnings('always', '', FutureWarning) - assert_equal( - insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3]) - assert_(w[0].category is FutureWarning) + assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9]) + assert_equal(insert(a, np.array([True, False, True, False]), 9), + [9, 1, 2, 9, 3]) def test_multidim(self): a = [[1, 1, 1]] @@ -1397,6 +1393,34 @@ def test_list_to_list(self): res = trim_zeros(self.a.tolist()) assert isinstance(res, list) + @pytest.mark.parametrize("ndim", (0, 1, 2, 3, 10)) + def test_nd_basic(self, ndim): + a = np.ones((2,) * ndim) + b = np.pad(a, (2, 1), mode="constant", constant_values=0) + res = trim_zeros(b, axis=None) + assert_array_equal(a, res) + + @pytest.mark.parametrize("ndim", (0, 1, 2, 3)) + def test_allzero(self, ndim): + a = np.zeros((3,) * ndim) + res = trim_zeros(a, axis=None) + assert_array_equal(res, np.zeros((0,) * ndim)) + + def test_trim_arg(self): + a = np.array([0, 1, 2, 0]) + + res = trim_zeros(a, trim='f') + assert_array_equal(res, [1, 2, 0]) + + res = trim_zeros(a, trim='b') + assert_array_equal(res, [0, 1, 2]) + + @pytest.mark.parametrize("trim", ("front", "")) + def test_unexpected_trim_value(self, trim): + arr = self.a + with pytest.raises(ValueError, match=r"unexpected character\(s\) in `trim`"): + trim_zeros(arr, trim=trim) + class TestExtins: @@ -1563,7 +1587,7 @@ def test_keywords_no_func_code(self): try: vectorize(random.randrange) # Should succeed except Exception: - raise AssertionError() + raise AssertionError def test_keywords2_ticket_2100(self): # Test kwarg support: enhancement ticket 2100 @@ -1935,7 +1959,7 @@ def test_positional_regression_9477(self): def test_datetime_conversion(self): otype = "datetime64[ns]" - arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], dtype='datetime64[ns]') assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", otypes=[otype])(arr), arr) @@ -2513,6 +2537,12 @@ def test_cov_dtype(self, test_type): res = cov(cast_x1, dtype=test_type) assert test_type == res.dtype + def test_gh_27658(self): + x = np.ones((3, 1)) + expected = np.cov(x, ddof=0, rowvar=True) + actual = np.cov(x.T, ddof=0, rowvar=False) + assert_allclose(actual, expected, strict=True) + class Test_I0: @@ -3193,8 +3223,6 @@ def test_linear_interpolation(self, input_dtype, expected_dtype): expected_dtype = np.dtype(expected_dtype) - if np._get_promotion_state() == "legacy": - expected_dtype = np.promote_types(expected_dtype, np.float64) arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) weights = np.ones_like(arr) if weighted else None @@ -4010,6 +4038,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + def test_quantile_weights_raises_negative_weights(self): y = [1, 2] w = [-0.5, 1] diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 09a1a5ab709d..4b300624cac7 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -6,7 +6,6 @@ assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_raises_regex, suppress_warnings, ) -from numpy.testing._private.utils import requires_memory import pytest @@ -270,7 +269,7 @@ def test_object_array_of_0d(self): histogram, [np.array(0.4) for i in range(10)] + [np.inf]) # these should not crash - np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001]) + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002]) np.histogram([np.array(0.5) for i in range(10)] + [.5]) def test_some_nan_values(self): @@ -395,6 +394,11 @@ def test_histogram_bin_edges(self): edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) assert_array_equal(edges, e) + def test_small_value_range(self): + arr = np.array([1, 1 + 2e-16] * 10) + with pytest.raises(ValueError, match="Too many bins for data range"): + histogram(arr, bins=10) + # @requires_memory(free_bytes=1e10) # @pytest.mark.slow @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 38ded1f26cda..742915e22ef0 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2790,12 +2790,32 @@ def test_load_refcount(): x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + def test_load_multiple_arrays_until_eof(): f = BytesIO() np.save(f, 1) np.save(f, 2) f.seek(0) - assert np.load(f) == 1 - assert np.load(f) == 2 + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 with pytest.raises(EOFError): np.load(f) + + +def test_savez_nopickle(): + obj_array = np.array([1, 'hello'], dtype=object) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez(tmp, obj_array, allow_pickle=False) + + with temppath(suffix='.npz') as tmp: + np.savez_compressed(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez_compressed(tmp, obj_array, allow_pickle=False) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index aba00c4256ad..116cd1608da3 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -18,12 +18,12 @@ def test_scientific_notation(): """Test that both 'e' and 'E' are parsed correctly.""" data = StringIO( - + "1.0e-1,2.0E1,3.0\n" "4.0e-2,5.0E-1,6.0\n" "7.0e-3,8.0E1,9.0\n" "0.0e-4,1.0E-1,2.0" - + ) expected = np.array( [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] @@ -42,18 +42,18 @@ def test_comment_multiple_chars(comment): @pytest.fixture def mixed_types_structured(): """ - Fixture providing hetergeneous input data with a structured dtype, along + Fixture providing heterogeneous input data with a structured dtype, along with the associated structured array. """ data = StringIO( - + "1000;2.4;alpha;-34\n" "2000;3.1;beta;29\n" "3500;9.9;gamma;120\n" "4090;8.1;delta;0\n" "5001;4.4;epsilon;-99\n" "6543;7.8;omega;-1\n" - + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] @@ -597,14 +597,14 @@ def test_comment_multichar_error_with_quote(): def test_structured_dtype_with_quotes(): data = StringIO( - + "1000;2.4;'alpha';-34\n" "2000;3.1;'beta';29\n" "3500;9.9;'gamma';120\n" "4090;8.1;'delta';0\n" "5001;4.4;'epsilon';-99\n" "6543;7.8;'omega';-1\n" - + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index 2a92cad2f315..c8fa7df86b24 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -142,7 +142,7 @@ def test_result_values(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) match = "All-NaN slice encountered" @@ -294,7 +294,7 @@ def test_result_values(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) for func in self.nanfuncs: @@ -575,7 +575,7 @@ class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) for func, identity in zip(self.nanfuncs, [0, 1]): @@ -634,7 +634,7 @@ class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) for func, identity in zip(self.nanfuncs, [0, 1]): @@ -744,7 +744,7 @@ def test_ddof_too_big(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" @@ -1181,7 +1181,7 @@ def gen_weights(d): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): @@ -1356,7 +1356,7 @@ def test_no_p_overwrite(self): ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: - pytest.skip(f"`axis != None` not supported for 0d arrays") + pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 5fface63c7d5..460de9985fa0 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -265,8 +265,8 @@ def test_zero_poly_dtype(self): def test_poly_eq(self): p = np.poly1d([1, 2, 3]) p2 = np.poly1d([1, 2, 4]) - assert_equal(p == None, False) - assert_equal(p != None, True) + assert_equal(p == None, False) # noqa: E711 + assert_equal(p != None, True) # noqa: E711 assert_equal(p == p, True) assert_equal(p == p2, False) assert_equal(p != p2, True) diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py index 98860dfdab77..37ab6d390ac8 100644 --- a/numpy/lib/tests/test_recfunctions.py +++ b/numpy/lib/tests/test_recfunctions.py @@ -1,4 +1,3 @@ -import pytest import numpy as np import numpy.ma as ma diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 5b777f5735e4..d68cd7d6dcca 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -5,7 +5,6 @@ assert_, assert_equal, assert_array_equal, assert_array_almost_equal, assert_raises, _assert_valid_refcount, ) -import pytest class TestRegression: @@ -181,7 +180,7 @@ def test_append_fields_dtype_list(self): try: append_fields(base, names, data, dlist) except Exception: - raise AssertionError() + raise AssertionError def test_loadtxt_fields_subarrays(self): # For ticket #1936 @@ -210,7 +209,7 @@ def test_nansum_with_boolean(self): try: np.nansum(a) except Exception: - raise AssertionError() + raise AssertionError def test_py3_compat(self): # gh-2561 @@ -223,6 +222,6 @@ class C: try: np.info(C(), output=out) except AttributeError: - raise AssertionError() + raise AssertionError finally: out.close() diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py index e8e11c4257c3..01c888bef6f1 100644 --- a/numpy/lib/tests/test_type_check.py +++ b/numpy/lib/tests/test_type_check.py @@ -356,10 +356,10 @@ def test_generic(self): assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) - + # perform the same tests but with nan, posinf and neginf keywords with np.errstate(divide='ignore', invalid='ignore'): - vals = nan_to_num(np.array((-1., 0, 1))/0., + vals = nan_to_num(np.array((-1., 0, 1))/0., nan=10, posinf=20, neginf=30) assert_equal(vals, [30, 10, 20]) assert_all(np.isfinite(vals[[0, 2]])) @@ -375,7 +375,7 @@ def test_generic(self): assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) - + # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): vals = np.array((-1., 0, 1))/0. @@ -440,9 +440,9 @@ def test_complex_bad2(self): # !! inf. Comment out for now, and see if it # !! changes #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) - + def test_do_not_rewrite_previous_keyword(self): - # This is done to test that when, for instance, nan=np.inf then these + # This is done to test that when, for instance, nan=np.inf then these # values are not rewritten by posinf keyword to the posinf value. with np.errstate(divide='ignore', invalid='ignore'): vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..644912d941e3 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', '>> LA.vector_norm(b, ord=-np.inf) 1.0 + >>> LA.vector_norm(b, ord=0) + 9.0 >>> LA.vector_norm(b, ord=1) 45.0 >>> LA.vector_norm(b, ord=-1) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 0d431794b74d..d3ca3eb701b7 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -8,11 +8,14 @@ from typing import ( SupportsIndex, SupportsInt, NamedTuple, - Generic, ) import numpy as np from numpy import ( + # re-exports + vecdot, + + # other generic, floating, complexfloating, @@ -24,12 +27,13 @@ from numpy import ( float64, complex128, ) - -from numpy.linalg import LinAlgError as LinAlgError - +from numpy.linalg import LinAlgError +from numpy._core.fromnumeric import matrix_transpose +from numpy._core.numeric import tensordot from numpy._typing import ( NDArray, ArrayLike, + DTypeLike, _ArrayLikeUnknown, _ArrayLikeBool_co, _ArrayLikeInt_co, @@ -38,18 +42,50 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeTD64_co, _ArrayLikeObject_co, - DTypeLike, ) +__all__ = [ + "matrix_power", + "solve", + "tensorsolve", + "tensorinv", + "inv", + "cholesky", + "eigvals", + "eigvalsh", + "pinv", + "slogdet", + "det", + "svd", + "svdvals", + "eig", + "eigh", + "lstsq", + "norm", + "qr", + "cond", + "matrix_rank", + "LinAlgError", + "multi_dot", + "trace", + "diagonal", + "cross", + "outer", + "tensordot", + "matmul", + "matrix_transpose", + "matrix_norm", + "vector_norm", + "vecdot", +] + _T = TypeVar("_T") _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_SCT = TypeVar("_SCT", bound=generic, covariant=True) _SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) _2Tuple: TypeAlias = tuple[_T, _T] _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] -__all__: list[str] class EigResult(NamedTuple): eigenvalues: NDArray[Any] diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index aeb6139b3a56..cafb31c39888 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -305,7 +305,7 @@ def scrubSource(source, nsteps=None, verbose=False): else: nsteps = None - source = scrub_source(source, nsteps, verbose=True) + source = scrubSource(source, nsteps, verbose=True) with open(outfilename, 'w') as writefo: writefo.write(source) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index ffd9550e7c1d..0745654a0730 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1855,8 +1855,8 @@ def test_basic_property(self, shape, dtype, upper): b = np.matmul(c.transpose(t).conj(), c) else: b = np.matmul(c, c.transpose(t).conj()) - with np._no_nep50_warning(): - atol = 500 * a.shape[0] * np.finfo(dtype).eps + + atol = 500 * a.shape[0] * np.finfo(dtype).eps assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') # Check diag(L or U) is real and positive diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 8cac195aa864..7dd058e0fd1e 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -1,6 +1,5 @@ """ Test functions for linalg module """ -import warnings import pytest @@ -161,3 +160,18 @@ def test_matrix_rank_rtol_argument(self, rtol): x = np.zeros((4, 3, 2)) res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10*x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10*x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi index 805842a892e5..7e38d1793460 100644 --- a/numpy/ma/__init__.pyi +++ b/numpy/ma/__init__.pyi @@ -1,233 +1,458 @@ -from numpy._pytesttester import PytestTester - -from numpy.ma import extras as extras - -from numpy.ma.core import ( - MAError as MAError, - MaskError as MaskError, - MaskType as MaskType, - MaskedArray as MaskedArray, - abs as abs, - absolute as absolute, - add as add, - all as all, - allclose as allclose, - allequal as allequal, - alltrue as alltrue, - amax as amax, - amin as amin, - angle as angle, - anom as anom, - anomalies as anomalies, - any as any, - append as append, - arange as arange, - arccos as arccos, - arccosh as arccosh, - arcsin as arcsin, - arcsinh as arcsinh, - arctan as arctan, - arctan2 as arctan2, - arctanh as arctanh, - argmax as argmax, - argmin as argmin, - argsort as argsort, - around as around, - array as array, - asanyarray as asanyarray, - asarray as asarray, - bitwise_and as bitwise_and, - bitwise_or as bitwise_or, - bitwise_xor as bitwise_xor, - bool as bool, - ceil as ceil, - choose as choose, - clip as clip, - common_fill_value as common_fill_value, - compress as compress, - compressed as compressed, - concatenate as concatenate, - conjugate as conjugate, - convolve as convolve, - copy as copy, - correlate as correlate, - cos as cos, - cosh as cosh, - count as count, - cumprod as cumprod, - cumsum as cumsum, - default_fill_value as default_fill_value, - diag as diag, - diagonal as diagonal, - diff as diff, - divide as divide, - empty as empty, - empty_like as empty_like, - equal as equal, - exp as exp, - expand_dims as expand_dims, - fabs as fabs, - filled as filled, - fix_invalid as fix_invalid, - flatten_mask as flatten_mask, - flatten_structured_array as flatten_structured_array, - floor as floor, - floor_divide as floor_divide, - fmod as fmod, - frombuffer as frombuffer, - fromflex as fromflex, - fromfunction as fromfunction, - getdata as getdata, - getmask as getmask, - getmaskarray as getmaskarray, - greater as greater, - greater_equal as greater_equal, - harden_mask as harden_mask, - hypot as hypot, - identity as identity, - ids as ids, - indices as indices, - inner as inner, - innerproduct as innerproduct, - isMA as isMA, - isMaskedArray as isMaskedArray, - is_mask as is_mask, - is_masked as is_masked, - isarray as isarray, - left_shift as left_shift, - less as less, - less_equal as less_equal, - log as log, - log10 as log10, - log2 as log2, - logical_and as logical_and, - logical_not as logical_not, - logical_or as logical_or, - logical_xor as logical_xor, - make_mask as make_mask, - make_mask_descr as make_mask_descr, - make_mask_none as make_mask_none, - mask_or as mask_or, - masked as masked, - masked_array as masked_array, - masked_equal as masked_equal, - masked_greater as masked_greater, - masked_greater_equal as masked_greater_equal, - masked_inside as masked_inside, - masked_invalid as masked_invalid, - masked_less as masked_less, - masked_less_equal as masked_less_equal, - masked_not_equal as masked_not_equal, - masked_object as masked_object, - masked_outside as masked_outside, - masked_print_option as masked_print_option, - masked_singleton as masked_singleton, - masked_values as masked_values, - masked_where as masked_where, - max as max, - maximum as maximum, - maximum_fill_value as maximum_fill_value, - mean as mean, - min as min, - minimum as minimum, - minimum_fill_value as minimum_fill_value, - mod as mod, - multiply as multiply, - mvoid as mvoid, - ndim as ndim, - negative as negative, - nomask as nomask, - nonzero as nonzero, - not_equal as not_equal, - ones as ones, - outer as outer, - outerproduct as outerproduct, - power as power, - prod as prod, - product as product, - ptp as ptp, - put as put, - putmask as putmask, - ravel as ravel, - remainder as remainder, - repeat as repeat, - reshape as reshape, - resize as resize, - right_shift as right_shift, - round as round, - set_fill_value as set_fill_value, - shape as shape, - sin as sin, - sinh as sinh, - size as size, - soften_mask as soften_mask, - sometrue as sometrue, - sort as sort, - sqrt as sqrt, - squeeze as squeeze, - std as std, - subtract as subtract, - sum as sum, - swapaxes as swapaxes, - take as take, - tan as tan, - tanh as tanh, - trace as trace, - transpose as transpose, - true_divide as true_divide, - var as var, - where as where, - zeros as zeros, +from . import core, extras +from .core import ( + MAError, + MaskError, + MaskType, + MaskedArray, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + amax, + amin, + angle, + anom, + anomalies, + any, + append, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argsort, + around, + array, + asanyarray, + asarray, + bool_, + bitwise_and, + bitwise_or, + bitwise_xor, + ceil, + choose, + clip, + common_fill_value, + compress, + compressed, + concatenate, + conjugate, + convolve, + copy, + correlate, + cos, + cosh, + count, + cumprod, + cumsum, + default_fill_value, + diag, + diagonal, + diff, + divide, + empty, + empty_like, + equal, + exp, + expand_dims, + fabs, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + floor, + floor_divide, + fmod, + frombuffer, + fromflex, + fromfunction, + getdata, + getmask, + getmaskarray, + greater, + greater_equal, + harden_mask, + hypot, + identity, + ids, + indices, + inner, + innerproduct, + isMA, + isMaskedArray, + is_mask, + is_masked, + isarray, + left_shift, + less, + less_equal, + log, + log10, + log2, + logical_and, + logical_not, + logical_or, + logical_xor, + make_mask, + make_mask_descr, + make_mask_none, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_invalid, + masked_less, + masked_less_equal, + masked_not_equal, + masked_object, + masked_outside, + masked_print_option, + masked_singleton, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + mean, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + ndim, + negative, + nomask, + nonzero, + not_equal, + ones, + ones_like, + outer, + outerproduct, + power, + prod, + product, + ptp, + put, + putmask, + ravel, + remainder, + repeat, + reshape, + resize, + right_shift, + round, + round_, + set_fill_value, + shape, + sin, + sinh, + size, + soften_mask, + sometrue, + sort, + sqrt, + squeeze, + std, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + trace, + transpose, + true_divide, + var, + where, + zeros, + zeros_like, ) - -from numpy.ma.extras import ( - apply_along_axis as apply_along_axis, - apply_over_axes as apply_over_axes, - atleast_1d as atleast_1d, - atleast_2d as atleast_2d, - atleast_3d as atleast_3d, - average as average, - clump_masked as clump_masked, - clump_unmasked as clump_unmasked, - column_stack as column_stack, - compress_cols as compress_cols, - compress_nd as compress_nd, - compress_rowcols as compress_rowcols, - compress_rows as compress_rows, - count_masked as count_masked, - corrcoef as corrcoef, - cov as cov, - diagflat as diagflat, - dot as dot, - dstack as dstack, - ediff1d as ediff1d, - flatnotmasked_contiguous as flatnotmasked_contiguous, - flatnotmasked_edges as flatnotmasked_edges, - hsplit as hsplit, - hstack as hstack, - isin as isin, - in1d as in1d, - intersect1d as intersect1d, - mask_cols as mask_cols, - mask_rowcols as mask_rowcols, - mask_rows as mask_rows, - masked_all as masked_all, - masked_all_like as masked_all_like, - median as median, - mr_ as mr_, - ndenumerate as ndenumerate, - notmasked_contiguous as notmasked_contiguous, - notmasked_edges as notmasked_edges, - polyfit as polyfit, - row_stack as row_stack, - setdiff1d as setdiff1d, - setxor1d as setxor1d, - stack as stack, - unique as unique, - union1d as union1d, - vander as vander, - vstack as vstack, +from .extras import ( + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + column_stack, + compress_cols, + compress_nd, + compress_rowcols, + compress_rows, + count_masked, + corrcoef, + cov, + diagflat, + dot, + dstack, + ediff1d, + flatnotmasked_contiguous, + flatnotmasked_edges, + hsplit, + hstack, + isin, + in1d, + intersect1d, + mask_cols, + mask_rowcols, + mask_rows, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + row_stack, + setdiff1d, + setxor1d, + stack, + unique, + union1d, + vander, + vstack, ) -__all__: list[str] -test: PytestTester +__all__ = [ + "core", + "extras", + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log10", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "count_masked", + "corrcoef", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "isin", + "in1d", + "intersect1d", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "unique", + "union1d", + "vander", + "vstack", +] diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 01eb8f9415a9..97d6c9eafa5a 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -21,12 +21,12 @@ """ # pylint: disable-msg=E1002 import builtins +import functools import inspect import operator import warnings import textwrap import re -from functools import reduce from typing import Dict import numpy as np @@ -193,7 +193,7 @@ class MaskError(MAError): if scalar_dtype.kind in "Mm": info = np.iinfo(np.int64) - min_val, max_val = info.min, info.max + min_val, max_val = info.min + 1, info.max elif np.issubdtype(scalar_dtype, np.integer): info = np.iinfo(sctype) min_val, max_val = info.min, info.max @@ -469,6 +469,16 @@ def _check_fill_value(fill_value, ndtype): ndtype = np.dtype(ndtype) if fill_value is None: fill_value = default_fill_value(ndtype) + # TODO: It seems better to always store a valid fill_value, the oddity + # about is that `_fill_value = None` would behave even more + # different then. + # (e.g. this allows arr_uint8.astype(int64) to have the default + # fill value again...) + # The one thing that changed in 2.0/2.1 around cast safety is that the + # default `int(99...)` is not a same-kind cast anymore, so if we + # have a uint, use the default uint. + if ndtype.kind == "u": + fill_value = np.uint(fill_value) elif ndtype.names is not None: if isinstance(fill_value, (ndarray, np.void)): try: @@ -929,6 +939,7 @@ def __init__(self, ufunc): self.f = ufunc self.__doc__ = ufunc.__doc__ self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ def __str__(self): return f"Masked version of {self.f}" @@ -1787,7 +1798,7 @@ def mask_or(m1, m2, copy=False, shrink=True): dtype = getattr(m1, 'dtype', MaskType) return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): - return m1 + return _shrink_mask(m1) if shrink else m1 (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) if dtype1 != dtype2: raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) @@ -1853,7 +1864,7 @@ def _flatsequence(sequence): mask = np.asarray(mask) flattened = _flatsequence(_flatmask(mask)) - return np.array([_ for _ in flattened], dtype=bool) + return np.array(list(flattened), dtype=bool) def _check_mask_axis(mask, axis, keepdims=np._NoValue): @@ -3147,13 +3158,17 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] - m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func) if domain is not None: # Take the domain, and make sure it's a ndarray with np.errstate(divide='ignore', invalid='ignore'): - d = filled(domain(*input_args), True) + # The result may be masked for two (unary) domains. + # That can't really be right as some domains drop + # the mask and some don't behaving differently here. + d = domain(*input_args).astype(bool, copy=False) + d = filled(d, True) if d.any(): # Fill the result where the domain is wrong @@ -4626,9 +4641,6 @@ def count(self, axis=None, keepdims=np._NoValue): The default, None, performs the count over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. - - .. versionadded:: 1.10.0 - If this is a tuple of ints, the count is performed on multiple axes, instead of a single axis or all the axes as before. keepdims : bool, optional @@ -5198,8 +5210,6 @@ def dot(self, b, out=None, strict=False): recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. - .. versionadded:: 1.10.0 - Parameters ---------- b : masked_array_like @@ -5218,8 +5228,6 @@ def dot(self, b, out=None, strict=False): means that if a masked value appears in a row or column, the whole row or column is considered masked. - .. versionadded:: 1.10.2 - See Also -------- numpy.ma.dot : equivalent function @@ -5648,13 +5656,6 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, axis : int, optional Axis along which to sort. If None, the default, the flattened array is used. - - .. versionchanged:: 1.13.0 - Previously, the default was documented to be -1, but that was - in error. At some future date, the default will change to -1, as - originally intended. - Until then, the axis should be given explicitly when - ``arr.ndim > 1``, to avoid a FutureWarning. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. order : list, optional @@ -5911,7 +5912,6 @@ def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. - .. versionadded:: 1.7.0 If this is a tuple of ints, the minimum is selected over multiple axes, instead of a single axis or all the axes as before. out : array_like, optional @@ -5987,7 +5987,7 @@ def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): result = masked return result # Explicit output - result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) + self.filled(fill_value).min(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: @@ -6010,7 +6010,6 @@ def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): axis : None or int or tuple of ints, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. - .. versionadded:: 1.7.0 If this is a tuple of ints, the maximum is selected over multiple axes, instead of a single axis or all the axes as before. out : array_like, optional @@ -6093,7 +6092,7 @@ def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): result = masked return result # Explicit output - result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) + self.filled(fill_value).max(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: @@ -6181,7 +6180,7 @@ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): >>> y.ptp(axis=1).view(np.uint8) masked_array(data=[126, 127, 128, 129], mask=False, - fill_value=np.int64(999999), + fill_value=np.uint64(999999), dtype=uint8) """ if out is None: @@ -6211,7 +6210,72 @@ def argpartition(self, *args, **kwargs): def take(self, indices, axis=None, out=None, mode='raise'): """ - """ + Take elements from a masked array along an axis. + + This function does the same thing as "fancy" indexing (indexing arrays + using arrays) for masked arrays. It can be easier to use if you need + elements along a given axis. + + Parameters + ---------- + a : masked_array + The source masked array. + indices : array_like + The indices of the values to extract. Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : MaskedArray, optional + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : MaskedArray + The returned array has the same type as `a`. + + See Also + -------- + numpy.take : Equivalent function for ndarrays. + compress : Take elements using a boolean mask. + take_along_axis : Take elements by matching the array and the index arrays. + + Notes + ----- + This function behaves similarly to `numpy.take`, but it handles masked + values. The mask is retained in the output array, and masked values + in the input array remain masked in the output. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([4, 3, 5, 7, 6, 8], mask=[0, 0, 1, 0, 1, 0]) + >>> indices = [0, 1, 4] + >>> np.ma.take(a, indices) + masked_array(data=[4, 3, --], + mask=[False, False, True], + fill_value=999999) + + When `indices` is not one-dimensional, the output also has these dimensions: + + >>> np.ma.take(a, [[0, 1], [2, 3]]) + masked_array(data=[[4, 3], + [--, 7]], + mask=[[False, False], + [ True, False]], + fill_value=999999) + """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked @@ -6353,8 +6417,6 @@ def tobytes(self, fill_value=None, order='C'): The array is filled with a fill value before the string conversion. - .. versionadded:: 1.9.0 - Parameters ---------- fill_value : scalar, optional @@ -7038,6 +7100,7 @@ class _frommethod: def __init__(self, methodname, reversed=False): self.__name__ = methodname + self.__qualname__ = methodname self.__doc__ = self.getdoc() self.reversed = reversed @@ -7097,6 +7160,7 @@ def __call__(self, a, *args, **params): def take(a, indices, axis=None, out=None, mode='raise'): """ + """ a = masked_array(a) return a.take(indices, axis=axis, out=out, mode=mode) @@ -7805,10 +7869,10 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): >>> np.ma.diff(u8_arr) masked_array(data=[255], mask=False, - fill_value=np.int64(999999), + fill_value=np.uint64(999999), dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] - 255 + np.uint8(255) If this is not desirable, then the array should be cast to a larger integer type first: @@ -8163,8 +8227,6 @@ def dot(a, b, strict=False, out=None): conditions are not met, an exception is raised, instead of attempting to be flexible. - .. versionadded:: 1.10.2 - See Also -------- numpy.dot : Equivalent function for ndarrays. @@ -8858,8 +8920,6 @@ def __call__(self, *args, **params): def append(a, b, axis=None): """Append values to the end of an array. - .. versionadded:: 1.9.0 - Parameters ---------- a : array_like diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 826250d4c3a8..57136fa9d31c 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,24 +1,204 @@ from collections.abc import Callable from typing import Any, TypeVar -from numpy import ndarray, dtype, float64 from numpy import ( - amax as amax, - amin as amin, - bool as bool, - expand_dims as expand_dims, - clip as clip, - indices as indices, - ones_like as ones_like, - squeeze as squeeze, - zeros_like as zeros_like, - angle as angle + amax, + amin, + bool_, + expand_dims, + clip, + indices, + squeeze, + angle, + ndarray, + dtype, + float64, ) +__all__ = [ + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log10", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", +] + _ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) -__all__: list[str] MaskType = bool nomask: bool @@ -431,7 +611,8 @@ def size(obj, axis=...): ... def diff(a, /, n=..., axis=..., prepend=..., append=...): ... def where(condition, x=..., y=...): ... def choose(indices, choices, out=..., mode=...): ... -def round(a, decimals=..., out=...): ... +def round_(a, decimals=..., out=...): ... +round = round_ def inner(a, b): ... innerproduct = inner @@ -460,7 +641,9 @@ frombuffer: _convert2ma fromfunction: _convert2ma identity: _convert2ma ones: _convert2ma +ones_like: _convert2ma zeros: _convert2ma +zeros_like: _convert2ma def append(a, b, axis=...): ... def dot(a, b, strict=..., out=...): ... diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 8d41e939632f..bdc35c424ce3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -35,7 +35,6 @@ from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple from numpy.lib._function_base_impl import _ureduce from numpy.lib._index_tricks_impl import AxisConcatenator -from numpy._core.numeric import normalize_axis_tuple def issequence(seq): @@ -250,6 +249,7 @@ class _fromnxfunction: def __init__(self, funcname): self.__name__ = funcname + self.__qualname__ = funcname self.__doc__ = self.getdoc() def getdoc(self): @@ -743,8 +743,6 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. - .. versionadded:: 1.10.0 - Returns ------- median : ndarray @@ -1438,10 +1436,6 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): isin : Version of this function that preserves the shape of ar1. numpy.in1d : Equivalent function for ndarrays. - Notes - ----- - .. versionadded:: 1.4.0 - Examples -------- >>> import numpy as np @@ -1489,10 +1483,6 @@ def isin(element, test_elements, assume_unique=False, invert=False): in1d : Flattened version of this function. numpy.isin : Equivalent function for ndarrays. - Notes - ----- - .. versionadded:: 1.13.0 - Examples -------- >>> import numpy as np @@ -1667,8 +1657,6 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): the number of observations; this overrides the value implied by ``bias``. The default value is ``None``. - .. versionadded:: 1.5 - Raises ------ ValueError @@ -2057,9 +2045,6 @@ def flatnotmasked_contiguous(a): slice_list : list A sorted sequence of `slice` objects (start index, end index). - .. versionchanged:: 1.15.0 - Now returns an empty list instead of None for a fully masked array - See Also -------- flatnotmasked_edges, notmasked_contiguous, notmasked_edges @@ -2224,10 +2209,6 @@ def clump_unmasked(a): The list of slices, one for each continuous region of unmasked elements in `a`. - Notes - ----- - .. versionadded:: 1.4.0 - See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges @@ -2264,10 +2245,6 @@ def clump_masked(a): The list of slices, one for each continuous region of masked elements in `a`. - Notes - ----- - .. versionadded:: 1.4.0 - See Also -------- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 8e458fe165af..df69cd5d3465 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,13 +1,56 @@ from typing import Any from numpy.lib._index_tricks_impl import AxisConcatenator - -from numpy.ma.core import ( - dot as dot, - mask_rowcols as mask_rowcols, -) - -__all__: list[str] +from .core import dot, mask_rowcols + +__all__ = [ + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "count_masked", + "corrcoef", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "isin", + "in1d", + "intersect1d", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "unique", + "union1d", + "vander", + "vstack", +] def count_masked(arr, axis=...): ... def masked_all(shape, dtype = ...): ... diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index 4eb92b6bd7b0..10e9e834cb88 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -13,19 +13,11 @@ # first place, and then rename the invalid fields with a trailing # underscore. Maybe we could just overload the parser function ? -from numpy.ma import ( - MAError, MaskedArray, masked, nomask, masked_array, getdata, - getmaskarray, filled -) -import numpy.ma as ma import warnings import numpy as np -from numpy import dtype, ndarray, array as narray +import numpy.ma as ma -from numpy._core.records import ( - recarray, fromarrays as recfromarrays, fromrecords as recfromrecords -) _byteorderconv = np._core.records._byteorderconv @@ -82,7 +74,7 @@ def _get_fieldmask(self): return fdmask -class MaskedRecords(MaskedArray): +class MaskedRecords(ma.MaskedArray): """ Attributes @@ -103,17 +95,17 @@ class MaskedRecords(MaskedArray): def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, - mask=nomask, hard_mask=False, fill_value=None, keep_mask=True, + mask=ma.nomask, hard_mask=False, fill_value=None, keep_mask=True, copy=False, **options): - self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, - strides=strides, formats=formats, names=names, - titles=titles, byteorder=byteorder, - aligned=aligned,) + self = np.recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) mdtype = ma.make_mask_descr(self.dtype) - if mask is nomask or not np.size(mask): + if mask is ma.nomask or not np.size(mask): if not keep_mask: self._mask = tuple([False] * len(mdtype)) else: @@ -127,7 +119,7 @@ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." - raise MAError(msg % (nd, nm)) + raise ma.MAError(msg % (nd, nm)) if not keep_mask: self.__setmask__(mask) self._sharedmask = True @@ -144,20 +136,20 @@ def __array_finalize__(self, obj): # Make sure we have a _fieldmask by default _mask = getattr(obj, '_mask', None) if _mask is None: - objmask = getattr(obj, '_mask', nomask) - _dtype = ndarray.__getattribute__(self, 'dtype') - if objmask is nomask: + objmask = getattr(obj, '_mask', ma.nomask) + _dtype = np.ndarray.__getattribute__(self, 'dtype') + if objmask is ma.nomask: _mask = ma.make_mask_none(self.shape, dtype=_dtype) else: mdescr = ma.make_mask_descr(_dtype) - _mask = narray([tuple([m] * len(mdescr)) for m in objmask], - dtype=mdescr).view(recarray) + _mask = np.array([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(np.recarray) # Update some of the attributes _dict = self.__dict__ _dict.update(_mask=_mask) self._update_from(obj) - if _dict['_baseclass'] == ndarray: - _dict['_baseclass'] = recarray + if _dict['_baseclass'] == np.ndarray: + _dict['_baseclass'] = np.recarray return @property @@ -166,7 +158,7 @@ def _data(self): Returns the data as a recarray. """ - return ndarray.view(self, recarray) + return np.ndarray.view(self, np.recarray) @property def _fieldmask(self): @@ -193,15 +185,15 @@ def __getattribute__(self, attr): except AttributeError: # attr must be a fieldname pass - fielddict = ndarray.__getattribute__(self, 'dtype').fields + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields try: res = fielddict[attr][:2] except (TypeError, KeyError) as e: raise AttributeError( f'record array has no attribute {attr}') from e # So far, so good - _localdict = ndarray.__getattribute__(self, '__dict__') - _data = ndarray.view(self, _localdict['_baseclass']) + _localdict = np.ndarray.__getattribute__(self, '__dict__') + _data = np.ndarray.view(self, _localdict['_baseclass']) obj = _data.getfield(*res) if obj.dtype.names is not None: raise NotImplementedError("MaskedRecords is currently limited to" @@ -219,8 +211,8 @@ def __getattribute__(self, attr): tp_len = len(_mask.dtype) hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() if (obj.shape or hasmasked): - obj = obj.view(MaskedArray) - obj._baseclass = ndarray + obj = obj.view(ma.MaskedArray) + obj._baseclass = np.ndarray obj._isfield = True obj._mask = _mask # Reset the field values @@ -252,13 +244,13 @@ def __setattr__(self, attr, val): ret = object.__setattr__(self, attr, val) except Exception: # Not a generic attribute: exit if it's not a valid field - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} - optinfo = ndarray.__getattribute__(self, '_optinfo') or {} + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = np.ndarray.__getattribute__(self, '_optinfo') or {} if not (attr in fielddict or attr in optinfo): raise else: # Get the list of names - fielddict = ndarray.__getattribute__(self, 'dtype').fields or {} + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} # Check the attribute if attr not in fielddict: return ret @@ -276,7 +268,7 @@ def __setattr__(self, attr, val): raise AttributeError( f'record array has no attribute {attr}') from e - if val is masked: + if val is ma.masked: _fill_value = _localdict['_fill_value'] if _fill_value is not None: dval = _localdict['_fill_value'][attr] @@ -284,9 +276,9 @@ def __setattr__(self, attr, val): dval = val mval = True else: - dval = filled(val) - mval = getmaskarray(val) - obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res) + dval = ma.filled(val) + mval = ma.getmaskarray(val) + obj = np.ndarray.__getattribute__(self, '_data').setfield(dval, *res) _localdict['_mask'].__setitem__(attr, mval) return obj @@ -298,15 +290,15 @@ def __getitem__(self, indx): """ _localdict = self.__dict__ - _mask = ndarray.__getattribute__(self, '_mask') - _data = ndarray.view(self, _localdict['_baseclass']) + _mask = np.ndarray.__getattribute__(self, '_mask') + _data = np.ndarray.view(self, _localdict['_baseclass']) # We want a field if isinstance(indx, str): # Make sure _sharedmask is True to propagate back to _fieldmask # Don't use _set_mask, there are some copies being made that # break propagation Don't force the mask to nomask, that wreaks # easy masking - obj = _data[indx].view(MaskedArray) + obj = _data[indx].view(ma.MaskedArray) obj._mask = _mask[indx] obj._sharedmask = True fval = _localdict['_fill_value'] @@ -314,12 +306,12 @@ def __getitem__(self, indx): obj._fill_value = fval[indx] # Force to masked if the mask is True if not obj.ndim and obj._mask: - return masked + return ma.masked return obj # We want some elements. # First, the data. obj = np.asarray(_data[indx]).view(mrecarray) - obj._mask = np.asarray(_mask[indx]).view(recarray) + obj._mask = np.asarray(_mask[indx]).view(np.recarray) return obj def __setitem__(self, indx, value): @@ -327,7 +319,7 @@ def __setitem__(self, indx, value): Sets the given record to value. """ - MaskedArray.__setitem__(self, indx, value) + ma.MaskedArray.__setitem__(self, indx, value) if isinstance(indx, str): self._mask[indx] = ma.getmaskarray(value) @@ -366,16 +358,16 @@ def view(self, dtype=None, type=None): # OK, basic copy-paste from MaskedArray.view. if dtype is None: if type is None: - output = ndarray.view(self) + output = np.ndarray.view(self) else: - output = ndarray.view(self, type) + output = np.ndarray.view(self, type) # Here again. elif type is None: try: - if issubclass(dtype, ndarray): - output = ndarray.view(self, dtype) + if issubclass(dtype, np.ndarray): + output = np.ndarray.view(self, dtype) else: - output = ndarray.view(self, dtype) + output = np.ndarray.view(self, dtype) # OK, there's the change except TypeError: dtype = np.dtype(dtype) @@ -387,14 +379,14 @@ def view(self, dtype=None, type=None): output = self.__array__().view(dtype, basetype) output._update_from(self) else: - output = ndarray.view(self, dtype) + output = np.ndarray.view(self, dtype) output._fill_value = None else: - output = ndarray.view(self, dtype, type) + output = np.ndarray.view(self, dtype, type) # Update the mask, just like in MaskedArray.view - if (getattr(output, '_mask', nomask) is not nomask): + if (getattr(output, '_mask', ma.nomask) is not ma.nomask): mdtype = ma.make_mask_descr(output.dtype) - output._mask = self._mask.view(mdtype, ndarray) + output._mask = self._mask.view(mdtype, np.ndarray) output._mask.shape = output.shape return output @@ -432,8 +424,8 @@ def tolist(self, fill_value=None): """ if fill_value is not None: return self.filled(fill_value).tolist() - result = narray(self.filled().tolist(), dtype=object) - mask = narray(self._mask.tolist()) + result = np.array(self.filled().tolist(), dtype=object) + mask = np.array(self._mask.tolist()) result[mask] = None return result.tolist() @@ -468,8 +460,8 @@ def __setstate__(self, state): """ (ver, shp, typ, isf, raw, msk, flv) = state - ndarray.__setstate__(self, (shp, typ, isf, raw)) - mdtype = dtype([(k, np.bool) for (k, _) in self.dtype.descr]) + np.ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = np.dtype([(k, np.bool) for (k, _) in self.dtype.descr]) self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) self.fill_value = flv @@ -488,8 +480,8 @@ def _mrreconstruct(subtype, baseclass, baseshape, basetype,): Build a new MaskedArray from the information stored in a pickle. """ - _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype) - _mask = ndarray.__new__(ndarray, baseshape, 'b1') + _data = np.ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1') return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) mrecarray = MaskedRecords @@ -531,12 +523,12 @@ def fromarrays(arraylist, dtype=None, shape=None, formats=None, Lists of tuples should be preferred over lists of lists for faster processing. """ - datalist = [getdata(x) for x in arraylist] - masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] - _array = recfromarrays(datalist, - dtype=dtype, shape=shape, formats=formats, - names=names, titles=titles, aligned=aligned, - byteorder=byteorder).view(mrecarray) + datalist = [ma.getdata(x) for x in arraylist] + masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist] + _array = np.rec.fromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) _array._mask.flat = list(zip(*masklist)) if fill_value is not None: _array.fill_value = fill_value @@ -545,7 +537,7 @@ def fromarrays(arraylist, dtype=None, shape=None, formats=None, def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, titles=None, aligned=False, byteorder=None, - fill_value=None, mask=nomask): + fill_value=None, mask=ma.nomask): """ Creates a MaskedRecords from a list of records. @@ -579,22 +571,22 @@ def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, # Grab the initial _fieldmask, if needed: _mask = getattr(reclist, '_mask', None) # Get the list of records. - if isinstance(reclist, ndarray): + if isinstance(reclist, np.ndarray): # Make sure we don't have some hidden mask - if isinstance(reclist, MaskedArray): - reclist = reclist.filled().view(ndarray) + if isinstance(reclist, ma.MaskedArray): + reclist = reclist.filled().view(np.ndarray) # Grab the initial dtype, just in case if dtype is None: dtype = reclist.dtype reclist = reclist.tolist() - mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) # Set the fill_value if needed if fill_value is not None: mrec.fill_value = fill_value # Now, let's deal w/ the mask - if mask is not nomask: + if mask is not ma.nomask: mask = np.asarray(mask) maskrecordlength = len(mask.dtype) if maskrecordlength: @@ -716,8 +708,8 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', varnames = _varnames # Get the data. - _variables = masked_array([line.strip().split(delimiter) for line in ftext - if line[0] != commentchar and len(line) > 1]) + _variables = ma.masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) (_, nfields) = _variables.shape ftext.close() @@ -733,13 +725,13 @@ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', vartypes = _guessvartypes(_variables[0]) # Construct the descriptor. - mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)] + mdescr = list(zip(varnames, vartypes)) mfillv = [ma.default_fill_value(f) for f in vartypes] # Get the data and the mask. # We just need a list of masked_arrays. It's easier to create it like that: _mask = (_variables.T == missingchar) - _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f) + _datalist = [ma.masked_array(a, mask=m, dtype=t, fill_value=f) for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] return fromarrays(_datalist, dtype=mdescr) @@ -761,7 +753,7 @@ def addfield(mrecord, newfield, newfieldname=None): # Get the new data. # Create a new empty recarray newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) - newdata = recarray(_data.shape, newdtype) + newdata = np.recarray(_data.shape, newdtype) # Add the existing field [newdata.setfield(_data.getfield(*f), *f) for f in _data.dtype.fields.values()] @@ -771,12 +763,12 @@ def addfield(mrecord, newfield, newfieldname=None): # Get the new mask # Create a new empty recarray newmdtype = np.dtype([(n, np.bool) for n in newdtype.names]) - newmask = recarray(_data.shape, newmdtype) + newmask = np.recarray(_data.shape, newmdtype) # Add the old masks [newmask.setfield(_mask.getfield(*f), *f) for f in _mask.dtype.fields.values()] # Add the mask of the new field - newmask.setfield(getmaskarray(newfield), + newmask.setfield(ma.getmaskarray(newfield), *newmask.dtype.fields[newfieldname]) newdata._mask = newmask return newdata diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 85714420cb64..7e2fdb1e92c6 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -1,9 +1,16 @@ from typing import Any, TypeVar from numpy import dtype -from numpy.ma import MaskedArray +from . import MaskedArray -__all__: list[str] +__all__ = [ + "MaskedRecords", + "mrecarray", + "fromarrays", + "fromrecords", + "fromtextfile", + "addfield", +] _ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 970ae2875493..53651004db9a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -23,7 +23,7 @@ import numpy._core.umath as umath from numpy.exceptions import AxisError from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM + assert_raises, assert_warns, suppress_warnings, IS_WASM, temppath ) from numpy.testing._private.utils import requires_memory from numpy import ndarray @@ -1015,6 +1015,14 @@ def test_object_with_array(self): mx[1].data[0] = 0. assert_(mx2[0] == 0.) + def test_maskedarray_tofile_raises_notimplementederror(self): + xm = masked_array([1, 2, 3], mask=[False, True, False]) + # Test case to check the NotImplementedError. + # It is not implemented at this point of time. We can change this in future + with temppath(suffix='.npy') as path: + with pytest.raises(NotImplementedError): + np.save(path, xm) + class TestMaskedArrayArithmetic: # Base test class for MaskedArrays. @@ -1178,6 +1186,10 @@ def test_basic_ufuncs(self): assert_equal(np.greater_equal(x, y), greater_equal(xm, ym)) assert_equal(np.conjugate(x), conjugate(xm)) + def test_basic_ufuncs_masked(self): + # Mostly regression test for gh-25635 + assert np.sqrt(np.ma.masked) is np.ma.masked + def test_count_func(self): # Tests count assert_equal(1, count(1)) @@ -1344,6 +1356,43 @@ def test_minmax_dtypes(self): assert masked_array([-cmax, 0], mask=[0, 1]).max() == -cmax assert masked_array([cmax, 0], mask=[0, 1]).min() == cmax + @pytest.mark.parametrize("dtype", "bBiIqQ") + @pytest.mark.parametrize("mask", [ + [False, False, False, True, True], # masked min/max + [False, False, False, True, False], # masked max only + [False, False, False, False, True], # masked min only + ]) + @pytest.mark.parametrize("axis", [None, -1]) + def test_minmax_ints(self, dtype, mask, axis): + iinfo = np.iinfo(dtype) + # two dimensional to hit certain filling paths + a = np.array([[0, 10, -10, iinfo.min, iinfo.max]] * 2).astype(dtype) + mask = np.asarray([mask] * 2) + + masked_a = masked_array(a, mask=mask) + assert_array_equal(masked_a.min(axis), a[~mask].min(axis)) + assert_array_equal(masked_a.max(axis), a[~mask].max(axis)) + + @pytest.mark.parametrize("time_type", ["M8[s]", "m8[s]"]) + def test_minmax_time_dtypes(self, time_type): + def minmax_with_mask(arr, mask): + masked_arr = masked_array(arr, mask=mask) + expected_min = arr[~np.array(mask, dtype=bool)].min() + expected_max = arr[~np.array(mask, dtype=bool)].max() + + assert_array_equal(masked_arr.min(), expected_min) + assert_array_equal(masked_arr.max(), expected_max) + + # Additional tests on max/min for time dtypes + x1 = np.array([1, 1, -2, 4, 5, -10, 10, 1, 2, -2**63+1], dtype=time_type) + x2 = np.array(['NaT', 1, -2, 4, 5, -10, 10, 1, 2, 3], dtype=time_type) + x3 = np.array(['NaT', 'NaT', -2, 4, 5, -10, 10, 1, 2, 3], dtype=time_type) + x_test = [x1, x2, x3] + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0] + + for x in x_test: + minmax_with_mask(x, m) + def test_addsumprod(self): # Tests add, sum, product. (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -1868,20 +1917,20 @@ def test_eq_with_None(self): with suppress_warnings() as sup: sup.filter(FutureWarning, "Comparison to `None`") a = array([None, 1], mask=[0, 1]) - assert_equal(a == None, array([True, False], mask=[0, 1])) - assert_equal(a.data == None, [True, False]) - assert_equal(a != None, array([False, True], mask=[0, 1])) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 # With nomask a = array([None, 1], mask=False) - assert_equal(a == None, [True, False]) - assert_equal(a != None, [False, True]) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 # With complete mask a = array([None, 2], mask=True) - assert_equal(a == None, array([False, True], mask=True)) - assert_equal(a != None, array([True, False], mask=True)) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 # Fully masked, even comparison to None should return "masked" a = masked - assert_equal(a == None, masked) + assert_equal(a == None, masked) # noqa: E711 def test_eq_with_scalar(self): a = array(1) @@ -3789,9 +3838,9 @@ def test_toflex(self): assert_equal(record['_mask'], data._mask) ndtype = [('i', int), ('s', '|S3'), ('f', float)] - data = array([(i, s, f) for (i, s, f) in zip(np.arange(10), - 'ABCDEFGHIJKLM', - np.random.rand(10))], + data = array(list(zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))), dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() @@ -3799,9 +3848,9 @@ def test_toflex(self): assert_equal(record['_mask'], data._mask) ndtype = np.dtype("int, (2,3)float, float") - data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10), - np.random.rand(10), - np.random.rand(10))], + data = array(list(zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))), dtype=ndtype) data[[0, 1, 2, -1]] = masked record = data.toflex() @@ -4849,6 +4898,26 @@ def test_mask_or(self): cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) assert_equal(mask_or(amask, bmask), cntrl) + a = np.array([False, False]) + assert mask_or(a, a) is nomask # gh-27360 + + def test_allequal(self): + x = array([1, 2, 3], mask=[0, 0, 0]) + y = array([1, 2, 3], mask=[1, 0, 0]) + z = array([[1, 2, 3], [4, 5, 6]], mask=[[0, 0, 0], [1, 1, 1]]) + + assert allequal(x, y) + assert not allequal(x, y, fill_value=False) + assert allequal(x, z) + + # test allequal for the same input, with mask=nomask, this test is for + # the scenario raised in https://github.com/numpy/numpy/issues/27201 + assert allequal(x, x) + assert allequal(x, x, fill_value=False) + + assert allequal(y, y) + assert not allequal(y, y, fill_value=False) + def test_flatten_mask(self): # Tests flatten mask # Standard dtype @@ -5435,7 +5504,8 @@ def test_coercion_bytes(self): def test_subclass(self): # https://github.com/astropy/astropy/issues/6645 - class Sub(type(np.ma.masked)): pass + class Sub(type(np.ma.masked)): + pass a = Sub() assert_(a is Sub()) @@ -5707,3 +5777,15 @@ def test_deepcopy_0d_obj(): deepcopy[...] = 17 assert_equal(source, 0) assert_equal(deepcopy, 17) + + +def test_uint_fill_value_and_filled(): + # See also gh-27269 + a = np.ma.MaskedArray([1, 1], [True, False], dtype="uint16") + # the fill value should likely not be 99999, but for now guarantee it: + assert a.fill_value == 999999 + # However, it's type is uint: + assert a.fill_value.dtype.kind == "u" + # And this ensures things like filled work: + np.testing.assert_array_equal( + a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index a627245ffbb3..c454af09bb19 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -264,7 +264,7 @@ def test_subclasspreservation(self): # Checks that masked_array(...,subok=True) preserves the class. x = np.arange(5) m = [0, 0, 1, 0, 0] - xinfo = [(i, j) for (i, j) in zip(x, m)] + xinfo = list(zip(x, m)) xsub = MSubArray(x, mask=m, info={'xsub':xinfo}) # mxsub = masked_array(xsub, subok=False) diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py index 9ae4c63c8e9a..9c157308fcbd 100644 --- a/numpy/ma/timer_comparison.py +++ b/numpy/ma/timer_comparison.py @@ -1,5 +1,5 @@ +import functools import timeit -from functools import reduce import numpy as np import numpy._core.fromnumeric as fromnumeric @@ -133,10 +133,10 @@ def test_1(self): xf = np.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) - assert((xm-ym).filled(0).any()) + assert (xm-ym).filled(0).any() s = x.shape - assert(xm.size == reduce(lambda x, y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + assert xm.size == functools.reduce(lambda x, y: x*y, s) + assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) for s in [(4, 3), (6, 2)]: x.shape = s @@ -144,7 +144,7 @@ def test_1(self): xm.shape = s ym.shape = s xf.shape = s - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) @np.errstate(all='ignore') def test_2(self): diff --git a/numpy/matlib.py b/numpy/matlib.py index 95f573ab7400..7ee194d56b41 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -207,8 +207,6 @@ def eye(n,M=None, k=0, dtype=float, order='C'): Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. - .. versionadded:: 1.14.0 - Returns ------- I : matrix diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi new file mode 100644 index 000000000000..67b753a87c32 --- /dev/null +++ b/numpy/matlib.pyi @@ -0,0 +1,578 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt + +# ruff: noqa: F401 +from numpy import ( + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + # row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + sqrt, + square, + squeeze, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + trapz, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_T = TypeVar("_T", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# ruff: noqa: F811 + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +@overload +def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/__init__.pyi b/numpy/matrixlib/__init__.pyi index a7efab5844af..e8ec8b248866 100644 --- a/numpy/matrixlib/__init__.pyi +++ b/numpy/matrixlib/__init__.pyi @@ -1,14 +1,4 @@ -from numpy._pytesttester import PytestTester +from numpy import matrix +from .defmatrix import bmat, asmatrix -from numpy import ( - matrix as matrix, -) - -from numpy.matrixlib.defmatrix import ( - bmat as bmat, - mat as mat, - asmatrix as asmatrix, -) - -__all__: list[str] -test: PytestTester +__all__ = ["matrix", "bmat", "asmatrix"] diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 99c07fcf8f87..6512a0246db6 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -137,8 +137,10 @@ def __new__(subtype, data, dtype=None, copy=True): new = data.view(subtype) if intype != data.dtype: return new.astype(intype) - if copy: return new.copy() - else: return new + if copy: + return new.copy() + else: + return new if isinstance(data, str): data = _convert_from_string(data) @@ -169,7 +171,8 @@ def __new__(subtype, data, dtype=None, copy=True): def __array_finalize__(self, obj): self._getitem = False - if (isinstance(obj, matrix) and obj._getitem): return + if (isinstance(obj, matrix) and obj._getitem): + return ndim = self.ndim if (ndim == 2): return diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 9d0d1ee50b66..03476555e59e 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,16 +1,17 @@ from collections.abc import Sequence, Mapping from typing import Any -from numpy import matrix as matrix + +from numpy import matrix from numpy._typing import ArrayLike, DTypeLike, NDArray -__all__: list[str] +__all__ = ["matrix", "bmat", "asmatrix"] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], ldict: None | Mapping[str, Any] = ..., gdict: None | Mapping[str, Any] = ..., -) -> matrix[Any, Any]: ... +) -> matrix[tuple[int, int], Any]: ... -def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ... +def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[tuple[int, int], Any]: ... mat = asmatrix diff --git a/numpy/meson.build b/numpy/meson.build index 84dffaa3d880..6fef05b9113f 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -223,6 +223,7 @@ null_dep = dependency('', required : false) atomic_dep = null_dep code_non_lockfree = ''' #include + #include int main() { struct { void *p; @@ -230,10 +231,10 @@ code_non_lockfree = ''' } x; x.p = NULL; x.u8v = 0; - uint8_t res = __atomic_load_n(x.u8v, __ATOMIC_SEQ_CST); - __atomic_store_n(x.u8v, 1, ATOMIC_SEQ_CST); - void *p = __atomic_load_n(x.p, __ATOMIC_SEQ_CST); - __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST) + uint8_t res = __atomic_load_n(&x.u8v, __ATOMIC_SEQ_CST); + __atomic_store_n(&x.u8v, 1, __ATOMIC_SEQ_CST); + void *p = __atomic_load_n((void **)x.p, __ATOMIC_SEQ_CST); + __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST); return 0; } ''' @@ -271,14 +272,19 @@ python_sources = [ '__init__.pxd', '__init__.py', '__init__.pyi', + '__config__.pyi', '_array_api_info.py', '_array_api_info.pyi', '_configtool.py', + '_configtool.pyi', '_distributor_init.py', + '_distributor_init.pyi', '_globals.py', + '_globals.pyi', '_pytesttester.py', '_pytesttester.pyi', '_expired_attrs_2_0.py', + '_expired_attrs_2_0.pyi', 'conftest.py', 'ctypeslib.py', 'ctypeslib.pyi', @@ -287,6 +293,7 @@ python_sources = [ 'dtypes.py', 'dtypes.pyi', 'matlib.py', + 'matlib.pyi', 'py.typed', 'version.pyi', ] diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index d36605b89250..c5dccfe16dee 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -6,6 +6,7 @@ from .legendre import Legendre from .hermite import Hermite from .hermite_e import HermiteE from .laguerre import Laguerre +from . import polynomial, chebyshev, legendre, hermite, hermite_e, laguerre __all__ = [ "set_default_printstyle", diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 9c345553eedd..1c3d16c6efd7 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -23,8 +23,6 @@ class ABCPolyBase(abc.ABC): '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the methods listed below. - .. versionadded:: 1.9.0 - Parameters ---------- coef : array_like @@ -39,7 +37,7 @@ class ABCPolyBase(abc.ABC): Window, see domain for its use. The default value is the derived class window. symbol : str, optional - Symbol used to represent the independent variable in string + Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. @@ -190,8 +188,6 @@ def _fromroots(r): def has_samecoef(self, other): """Check if coefficients match. - .. versionadded:: 1.6.0 - Parameters ---------- other : class instance @@ -213,8 +209,6 @@ def has_samecoef(self, other): def has_samedomain(self, other): """Check if domains match. - .. versionadded:: 1.6.0 - Parameters ---------- other : class instance @@ -231,8 +225,6 @@ def has_samedomain(self, other): def has_samewindow(self, other): """Check if windows match. - .. versionadded:: 1.6.0 - Parameters ---------- other : class instance @@ -249,8 +241,6 @@ def has_samewindow(self, other): def has_sametype(self, other): """Check if types match. - .. versionadded:: 1.7.0 - Parameters ---------- other : object @@ -271,8 +261,6 @@ def _get_coefficients(self, other): class as self with identical domain and window. If so, return its coefficients, otherwise return `other`. - .. versionadded:: 1.9.0 - Parameters ---------- other : anything @@ -464,7 +452,7 @@ def _format_term(self, scalar_format: Callable, off: float, scale: float): ) needs_parens = True return term, needs_parens - + def _repr_latex_(self): # get the scaled argument string to the basis functions off, scale = self.mapparms() @@ -689,8 +677,6 @@ def copy(self): def degree(self): """The degree of the series. - .. versionadded:: 1.5.0 - Returns ------- degree : int @@ -730,8 +716,6 @@ def cutdeg(self, deg): squares where the coefficients of the high degree terms may be very small. - .. versionadded:: 1.5.0 - Parameters ---------- deg : non-negative int @@ -942,8 +926,6 @@ def linspace(self, n=100, domain=None): default the domain is the same as that of the series instance. This method is intended mostly as a plotting aid. - .. versionadded:: 1.5.0 - Parameters ---------- n : int, optional @@ -1010,13 +992,9 @@ class domain in NumPy 1.4 and ``None`` in later versions. chosen so that the errors of the products ``w[i]*y[i]`` all have the same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - - .. versionadded:: 1.5.0 window : {[beg, end]}, optional Window to use for the returned series. The default value is the default class domain - - .. versionadded:: 1.6.0 symbol : str, optional Symbol representing the independent variable. Default is 'x'. @@ -1041,6 +1019,9 @@ class domain in NumPy 1.4 and ``None`` in later versions. """ if domain is None: domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 elif type(domain) is list and len(domain) == 0: domain = cls.domain @@ -1142,8 +1123,6 @@ def basis(cls, deg, domain=None, window=None, symbol='x'): Returns the series representing the basis polynomial of degree `deg`. - .. versionadded:: 1.7.0 - Parameters ---------- deg : int @@ -1186,8 +1165,6 @@ def cast(cls, series, domain=None, window=None): module, but could be some other class that supports the convert method. - .. versionadded:: 1.7.0 - Parameters ---------- series : series diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 7519a755f528..ca7ca628d514 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,10 +1,8 @@ import abc import decimal import numbers -import sys from collections.abc import Iterator, Mapping, Sequence from typing import ( - TYPE_CHECKING, Any, ClassVar, Final, @@ -13,7 +11,6 @@ from typing import ( SupportsIndex, TypeAlias, TypeGuard, - TypeVar, overload, ) @@ -44,20 +41,15 @@ from ._polytypes import ( _ArrayLikeCoef_co, ) -if sys.version_info >= (3, 11): - from typing import LiteralString -elif TYPE_CHECKING: - from typing_extensions import LiteralString -else: - LiteralString: TypeAlias = str +from typing_extensions import LiteralString, TypeVar __all__: Final[Sequence[str]] = ("ABCPolyBase",) -_NameCo = TypeVar("_NameCo", bound=None | LiteralString, covariant=True) -_Self = TypeVar("_Self", bound="ABCPolyBase") -_Other = TypeVar("_Other", bound="ABCPolyBase") +_NameCo = TypeVar("_NameCo", bound=LiteralString | None, covariant=True, default=LiteralString | None) +_Self = TypeVar("_Self") +_Other = TypeVar("_Other", bound=ABCPolyBase) _AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co _Hundred: TypeAlias = Literal[100] @@ -125,8 +117,6 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): arg: _ArrayLikeCoefObject_co, ) -> npt.NDArray[np.object_]: ... - def __str__(self, /) -> str: ... - def __repr__(self, /) -> str: ... def __format__(self, fmt_str: str, /) -> str: ... def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... @@ -188,7 +178,7 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): self: _Self, /, domain: None | _SeriesLikeCoef_co = ..., - kind: type[_Self] = ..., + kind: None | type[_Self] = ..., window: None | _SeriesLikeCoef_co = ..., ) -> _Self: ... @@ -290,7 +280,7 @@ class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): ) -> _Self: ... @classmethod - def _str_term_unicode(cls, i: str, arg_str: str) -> str: ... + def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... @staticmethod def _str_term_ascii(i: str, arg_str: str) -> str: ... @staticmethod diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi index 54771c0581e4..b0794eb61831 100644 --- a/numpy/polynomial/_polytypes.pyi +++ b/numpy/polynomial/_polytypes.pyi @@ -1,7 +1,5 @@ -import sys from collections.abc import Callable, Sequence from typing import ( - TYPE_CHECKING, Any, Literal, NoReturn, @@ -9,9 +7,8 @@ from typing import ( SupportsIndex, SupportsInt, TypeAlias, - TypeVar, - final, overload, + type_check_only, ) import numpy as np @@ -23,6 +20,7 @@ from numpy._typing import ( _ArrayLikeNumber_co, _ArrayLikeObject_co, _NestedSequence, + _SupportsArray, # scalar-likes _IntLike_co, @@ -31,37 +29,17 @@ from numpy._typing import ( _NumberLike_co, ) -if sys.version_info >= (3, 11): - from typing import LiteralString -elif TYPE_CHECKING: - from typing_extensions import LiteralString -else: - LiteralString: TypeAlias = str +from typing_extensions import LiteralString, TypeVar + _T = TypeVar("_T") _T_contra = TypeVar("_T_contra", contravariant=True) - -_Tuple2: TypeAlias = tuple[_T, _T] - -_V = TypeVar("_V") -_V_co = TypeVar("_V_co", covariant=True) -_Self = TypeVar("_Self", bound=object) - +_Self = TypeVar("_Self") _SCT = TypeVar("_SCT", bound=np.number[Any] | np.bool | np.object_) -_SCT_co = TypeVar( - "_SCT_co", - bound=np.number[Any] | np.bool | np.object_, - covariant=True, -) - -@final -class _SupportsArray(Protocol[_SCT_co]): - def __array__(self ,) -> npt.NDArray[_SCT_co]: ... -@final +# compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase +@type_check_only class _SupportsCoefOps(Protocol[_T_contra]): - # compatible with e.g. `int`, `float`, `complex`, `Decimal`, `Fraction`, - # and `ABCPolyBase` def __eq__(self, x: object, /) -> bool: ... def __ne__(self, x: object, /) -> bool: ... @@ -71,19 +49,16 @@ class _SupportsCoefOps(Protocol[_T_contra]): def __add__(self: _Self, x: _T_contra, /) -> _Self: ... def __sub__(self: _Self, x: _T_contra, /) -> _Self: ... def __mul__(self: _Self, x: _T_contra, /) -> _Self: ... - def __truediv__(self: _Self, x: _T_contra, /) -> _Self | float: ... def __pow__(self: _Self, x: _T_contra, /) -> _Self | float: ... def __radd__(self: _Self, x: _T_contra, /) -> _Self: ... def __rsub__(self: _Self, x: _T_contra, /) -> _Self: ... def __rmul__(self: _Self, x: _T_contra, /) -> _Self: ... - def __rtruediv__(self: _Self, x: _T_contra, /) -> _Self | float: ... _Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] _FloatSeries: TypeAlias = _Series[np.floating[Any]] _ComplexSeries: TypeAlias = _Series[np.complexfloating[Any, Any]] -_NumberSeries: TypeAlias = _Series[np.number[Any]] _ObjectSeries: TypeAlias = _Series[np.object_] _CoefSeries: TypeAlias = _Series[np.inexact[Any] | np.object_] @@ -92,38 +67,38 @@ _ComplexArray: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] _ObjectArray: TypeAlias = npt.NDArray[np.object_] _CoefArray: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] +_Tuple2: TypeAlias = tuple[_T, _T] _Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] _Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] _AnyInt: TypeAlias = SupportsInt | SupportsIndex -_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps +_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] _CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co # The term "series" is used here to refer to 1-d arrays of numeric scalars. _SeriesLikeBool_co: TypeAlias = ( - _SupportsArray[np.bool] + _SupportsArray[np.dtype[np.bool]] | Sequence[bool | np.bool] ) _SeriesLikeInt_co: TypeAlias = ( - _SupportsArray[np.integer[Any] | np.bool] + _SupportsArray[np.dtype[np.integer[Any] | np.bool]] | Sequence[_IntLike_co] ) _SeriesLikeFloat_co: TypeAlias = ( - _SupportsArray[np.floating[Any] | np.integer[Any] | np.bool] + _SupportsArray[np.dtype[np.floating[Any] | np.integer[Any] | np.bool]] | Sequence[_FloatLike_co] ) _SeriesLikeComplex_co: TypeAlias = ( - _SupportsArray[np.integer[Any] | np.inexact[Any] | np.bool] + _SupportsArray[np.dtype[np.inexact[Any] | np.integer[Any] | np.bool]] | Sequence[_ComplexLike_co] ) _SeriesLikeObject_co: TypeAlias = ( - _SupportsArray[np.object_] + _SupportsArray[np.dtype[np.object_]] | Sequence[_CoefObjectLike_co] ) _SeriesLikeCoef_co: TypeAlias = ( - # npt.NDArray[np.number[Any] | np.bool | np.object_] - _SupportsArray[np.number[Any] | np.bool | np.object_] + _SupportsArray[np.dtype[np.number[Any] | np.bool | np.object_]] | Sequence[_CoefLike_co] ) @@ -138,15 +113,16 @@ _ArrayLikeCoef_co: TypeAlias = ( | _ArrayLikeCoefObject_co ) -_Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) +_Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True, default=LiteralString) +@type_check_only class _Named(Protocol[_Name_co]): @property def __name__(self, /) -> _Name_co: ... _Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] -@final +@type_check_only class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, off: _SCT, scl: _SCT) -> _Line[_SCT]: ... @@ -165,11 +141,11 @@ class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): def __call__( self, /, - off: _SupportsCoefOps, - scl: _SupportsCoefOps, + off: _SupportsCoefOps[Any], + scl: _SupportsCoefOps[Any], ) -> _Line[np.object_]: ... -@final +@type_check_only class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... @@ -178,7 +154,7 @@ class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... -@final +@type_check_only class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -209,7 +185,7 @@ class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): c2: _SeriesLikeCoef_co, ) -> _ObjectSeries: ... -@final +@type_check_only class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... @@ -218,7 +194,7 @@ class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... -@final +@type_check_only class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... @@ -227,7 +203,7 @@ class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... -@final +@type_check_only class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -254,7 +230,7 @@ class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): maxpower: None | _IntLike_co = ..., ) -> _ObjectSeries: ... -@final +@type_check_only class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -284,7 +260,7 @@ class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): axis: SupportsIndex = ..., ) -> _ObjectArray: ... -@final +@type_check_only class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -314,13 +290,13 @@ class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): /, c: _ArrayLikeCoef_co, m: SupportsIndex = ..., - k: _SeriesLikeCoef_co | _SeriesLikeCoef_co = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., lbnd: _CoefLike_co = ..., scl: _CoefLike_co = ..., axis: SupportsIndex = ..., ) -> _ObjectArray: ... -@final +@type_check_only class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -369,9 +345,9 @@ class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): x: _CoefLike_co, r: _CoefLike_co, tensor: bool = ..., - ) -> _SupportsCoefOps: ... + ) -> _SupportsCoefOps[Any]: ... -@final +@type_check_only class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -420,9 +396,9 @@ class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): x: _CoefLike_co, c: _SeriesLikeObject_co, tensor: bool = ..., - ) -> _SupportsCoefOps: ... + ) -> _SupportsCoefOps[Any]: ... -@final +@type_check_only class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -471,9 +447,9 @@ class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): x: _CoefLike_co, y: _CoefLike_co, c: _SeriesLikeCoef_co, - ) -> _SupportsCoefOps: ... + ) -> _SupportsCoefOps[Any]: ... -@final +@type_check_only class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -528,14 +504,14 @@ class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): y: _CoefLike_co, z: _CoefLike_co, c: _SeriesLikeCoef_co, - ) -> _SupportsCoefOps: ... + ) -> _SupportsCoefOps[Any]: ... _AnyValF: TypeAlias = Callable[ [npt.ArrayLike, npt.ArrayLike, bool], _CoefArray, ] -@final +@type_check_only class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -573,20 +549,20 @@ class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): def __call__( self, val_f: _AnyValF, - c: _ArrayLikeCoef_co, + c: _SeriesLikeObject_co, /, - *args: _ArrayLikeCoef_co, - ) -> _ObjectArray: ... + *args: _CoefObjectLike_co, + ) -> _SupportsCoefOps[Any]: ... @overload def __call__( self, val_f: _AnyValF, - c: _SeriesLikeObject_co, + c: _ArrayLikeCoef_co, /, - *args: _CoefObjectLike_co, - ) -> _SupportsCoefOps: ... + *args: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... -@final +@type_check_only class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -619,7 +595,7 @@ class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): _AnyDegrees: TypeAlias = Sequence[SupportsIndex] -@final +@type_check_only class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -654,7 +630,7 @@ class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): deg: _AnyDegrees, ) -> _CoefArray: ... -@final +@type_check_only class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -699,7 +675,7 @@ _AnyFuncVander: TypeAlias = Callable[ _CoefArray, ] -@final +@type_check_only class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -738,7 +714,7 @@ class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): _FullFitResult: TypeAlias = Sequence[np.inexact[Any] | np.int32] -@final +@type_check_only class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -845,7 +821,7 @@ class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): w: None | _SeriesLikeFloat_co = ..., ) -> tuple[_ObjectArray, _FullFitResult]: ... -@final +@type_check_only class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -865,7 +841,7 @@ class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): _Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] -@final +@type_check_only class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -882,7 +858,7 @@ class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... -@final +@type_check_only class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): def __call__( self, @@ -890,7 +866,7 @@ class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): deg: SupportsIndex, ) -> _Tuple2[_Series[np.float64]]: ... -@final +@type_check_only class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__( @@ -907,6 +883,6 @@ class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): @overload def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... -@final +@type_check_only class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 66fe7d60c040..837847e45110 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -674,11 +674,6 @@ def chebmulx(c): -------- chebadd, chebsub, chebmul, chebdiv, chebpow - Notes - ----- - - .. versionadded:: 1.5.0 - Examples -------- >>> from numpy.polynomial import chebyshev as C @@ -800,7 +795,7 @@ def chebdiv(c1, c2): # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: - raise ZeroDivisionError() + raise ZeroDivisionError # FIXME: add message with details to exception # note: this is more efficient than `pu._div(chebmul, c1, c2)` lc1 = len(c1) @@ -904,8 +899,6 @@ def chebder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -1006,8 +999,6 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -1138,8 +1129,6 @@ def chebval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -1218,12 +1207,6 @@ def chebval2d(x, y, c): See Also -------- chebval, chebgrid2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(chebval, c, x, y) @@ -1271,12 +1254,6 @@ def chebgrid2d(x, y, c): See Also -------- chebval, chebval2d, chebval3d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(chebval, c, x, y) @@ -1322,12 +1299,6 @@ def chebval3d(x, y, z, c): See Also -------- chebval, chebval2d, chebgrid2d, chebgrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(chebval, c, x, y, z) @@ -1378,12 +1349,6 @@ def chebgrid3d(x, y, z, c): See Also -------- chebval, chebval2d, chebgrid2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(chebval, c, x, y, z) @@ -1484,12 +1449,6 @@ def chebvander2d(x, y, deg): See Also -------- chebvander, chebvander3d, chebval2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) @@ -1538,12 +1497,6 @@ def chebvander3d(x, y, z, deg): See Also -------- chebvander, chebvander3d, chebval2d, chebval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) @@ -1592,8 +1545,6 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - .. versionadded:: 1.5.0 - Returns ------- coef : ndarray, shape (M,) or (M, K) @@ -1694,12 +1645,6 @@ def chebcompanion(c): ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -1789,8 +1734,6 @@ def chebinterpolate(func, deg, args=()): series tends to a minmax approximation to `func` with increasing `deg` if the function is continuous in the interval. - .. versionadded:: 1.14.0 - Parameters ---------- func : function @@ -1819,7 +1762,6 @@ def chebinterpolate(func, deg, args=()): Notes ----- - The Chebyshev polynomials used in the interpolation are orthogonal when sampled at the Chebyshev points of the first kind. If it is desired to constrain some of the coefficients they can simply be set to the desired @@ -1871,9 +1813,6 @@ def chebgauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. For Gauss-Chebyshev there are closed form solutions for the sample points and weights. If n = `deg`, then @@ -1910,12 +1849,6 @@ def chebweight(x): ------- w : ndarray The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - """ w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x)) return w @@ -1941,12 +1874,6 @@ def chebpts1(npts): See Also -------- chebpts2 - - Notes - ----- - - .. versionadded:: 1.5.0 - """ _npts = int(npts) if _npts != npts: @@ -1975,12 +1902,6 @@ def chebpts2(npts): ------- pts : ndarray The Chebyshev points of the second kind. - - Notes - ----- - - .. versionadded:: 1.5.0 - """ _npts = int(npts) if _npts != npts: @@ -2014,8 +1935,6 @@ class Chebyshev(ABCPolyBase): The default value is [-1., 1.]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1., 1.]. - - .. versionadded:: 1.6.0 symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. @@ -2047,8 +1966,6 @@ def interpolate(cls, func, deg, domain=None, args=()): tends to a minmax approximation of `func` when the function is continuous in the domain. - .. versionadded:: 1.14.0 - Parameters ---------- func : function diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 656ab567e524..24e51dca7fa5 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -623,8 +623,6 @@ def hermder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -716,8 +714,6 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -843,8 +839,6 @@ def hermval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -936,11 +930,6 @@ def hermval2d(x, y, c): -------- hermval, hermgrid2d, hermval3d, hermgrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermval2d @@ -998,11 +987,6 @@ def hermgrid2d(x, y, c): -------- hermval, hermval2d, hermval3d, hermgrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermgrid2d @@ -1060,11 +1044,6 @@ def hermval3d(x, y, z, c): -------- hermval, hermval2d, hermgrid2d, hermgrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermval3d @@ -1126,11 +1105,6 @@ def hermgrid3d(x, y, z, c): -------- hermval, hermval2d, hermgrid2d, hermval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermgrid3d @@ -1254,11 +1228,6 @@ def hermvander2d(x, y, deg): -------- hermvander, hermvander3d, hermval2d, hermval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> import numpy as np @@ -1319,11 +1288,6 @@ def hermvander3d(x, y, z, deg): -------- hermvander, hermvander3d, hermval2d, hermval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermvander3d @@ -1493,11 +1457,6 @@ def hermcompanion(c): mat : ndarray Scaled companion matrix of dimensions (deg, deg). - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.hermite import hermcompanion @@ -1611,8 +1570,6 @@ def _normed_hermite_n(x, n): Notes ----- - .. versionadded:: 1.10.0 - This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard Hermite functions overflow when n >= 207. @@ -1655,9 +1612,6 @@ def hermgauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that @@ -1723,11 +1677,6 @@ def hermweight(x): w : ndarray The weight function at `x`. - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> import numpy as np @@ -1763,8 +1712,6 @@ class Hermite(ABCPolyBase): The default value is [-1., 1.]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1., 1.]. - - .. versionadded:: 1.6.0 symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 48b76894336e..c820760ef75c 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -621,8 +621,6 @@ def hermeder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -714,8 +712,6 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -841,8 +837,6 @@ def hermeval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -932,12 +926,6 @@ def hermeval2d(x, y, c): See Also -------- hermeval, hermegrid2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(hermeval, c, x, y) @@ -985,12 +973,6 @@ def hermegrid2d(x, y, c): See Also -------- hermeval, hermeval2d, hermeval3d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(hermeval, c, x, y) @@ -1036,12 +1018,6 @@ def hermeval3d(x, y, z, c): See Also -------- hermeval, hermeval2d, hermegrid2d, hermegrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(hermeval, c, x, y, z) @@ -1092,12 +1068,6 @@ def hermegrid3d(x, y, z, c): See Also -------- hermeval, hermeval2d, hermegrid2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(hermeval, c, x, y, z) @@ -1206,12 +1176,6 @@ def hermevander2d(x, y, deg): See Also -------- hermevander, hermevander3d, hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) @@ -1260,12 +1224,6 @@ def hermevander3d(x, y, z, deg): See Also -------- hermevander, hermevander3d, hermeval2d, hermeval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) @@ -1424,12 +1382,6 @@ def hermecompanion(c): ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -1536,8 +1488,6 @@ def _normed_hermite_e_n(x, n): Notes ----- - .. versionadded:: 1.10.0 - This function is needed for finding the Gauss points and integration weights for high degrees. The values of the standard HermiteE functions overflow when n >= 207. @@ -1580,9 +1530,6 @@ def hermegauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that @@ -1640,12 +1587,6 @@ def hermeweight(x): ------- w : ndarray The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - """ w = np.exp(-.5*x**2) return w @@ -1673,8 +1614,6 @@ class HermiteE(ABCPolyBase): The default value is [-1., 1.]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1., 1.]. - - .. versionadded:: 1.6.0 symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 87f3ffa6ffd7..b2cc5817c30c 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -617,8 +617,6 @@ def lagder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -714,8 +712,6 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -842,8 +838,6 @@ def lagval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -934,11 +928,6 @@ def lagval2d(x, y, c): -------- lagval, laggrid2d, lagval3d, laggrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagval2d @@ -993,11 +982,6 @@ def laggrid2d(x, y, c): -------- lagval, lagval2d, lagval3d, laggrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import laggrid2d @@ -1052,11 +1036,6 @@ def lagval3d(x, y, z, c): -------- lagval, lagval2d, laggrid2d, laggrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagval3d @@ -1115,11 +1094,6 @@ def laggrid3d(x, y, z, c): -------- lagval, lagval2d, laggrid2d, lagval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import laggrid3d @@ -1239,11 +1213,6 @@ def lagvander2d(x, y, deg): -------- lagvander, lagvander3d, lagval2d, lagval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> import numpy as np @@ -1302,11 +1271,6 @@ def lagvander3d(x, y, z, deg): -------- lagvander, lagvander3d, lagval2d, lagval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> import numpy as np @@ -1475,11 +1439,6 @@ def lagcompanion(c): mat : ndarray Companion matrix of dimensions (deg, deg). - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagcompanion @@ -1594,9 +1553,6 @@ def laggauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100 higher degrees may be problematic. The weights are determined by using the fact that @@ -1658,11 +1614,6 @@ def lagweight(x): w : ndarray The weight function at `x`. - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial.laguerre import lagweight @@ -1696,8 +1647,6 @@ class Laguerre(ABCPolyBase): The default value is [0., 1.]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [0., 1.]. - - .. versionadded:: 1.6.0 symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index 674b7f1bb82b..c2cd3fbfe760 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -639,8 +639,6 @@ def legder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -741,8 +739,6 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -874,8 +870,6 @@ def legval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, algebra_like @@ -955,12 +949,6 @@ def legval2d(x, y, c): See Also -------- legval, leggrid2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(legval, c, x, y) @@ -1008,12 +996,6 @@ def leggrid2d(x, y, c): See Also -------- legval, legval2d, legval3d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(legval, c, x, y) @@ -1059,12 +1041,6 @@ def legval3d(x, y, z, c): See Also -------- legval, legval2d, leggrid2d, leggrid3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._valnd(legval, c, x, y, z) @@ -1115,12 +1091,6 @@ def leggrid3d(x, y, z, c): See Also -------- legval, legval2d, leggrid2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._gridnd(legval, c, x, y, z) @@ -1221,12 +1191,6 @@ def legvander2d(x, y, deg): See Also -------- legvander, legvander3d, legval2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((legvander, legvander), (x, y), deg) @@ -1275,12 +1239,6 @@ def legvander3d(x, y, z, deg): See Also -------- legvander, legvander3d, legval2d, legval3d - - Notes - ----- - - .. versionadded:: 1.7.0 - """ return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) @@ -1329,8 +1287,6 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - .. versionadded:: 1.5.0 - Returns ------- coef : ndarray, shape (M,) or (M, K) @@ -1433,12 +1389,6 @@ def legcompanion(c): ------- mat : ndarray Scaled companion matrix of dimensions (deg, deg). - - Notes - ----- - - .. versionadded:: 1.7.0 - """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -1542,9 +1492,6 @@ def leggauss(deg): Notes ----- - - .. versionadded:: 1.7.0 - The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that @@ -1604,12 +1551,6 @@ def legweight(x): ------- w : ndarray The weight function at `x`. - - Notes - ----- - - .. versionadded:: 1.7.0 - """ w = x*0.0 + 1.0 return w @@ -1636,8 +1577,6 @@ class Legendre(ABCPolyBase): The default value is [-1., 1.]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1., 1.]. - - .. versionadded:: 1.6.0 symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 12ab1ba34f47..86ea3a5d1d6e 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -308,11 +308,6 @@ def polymulx(c): -------- polyadd, polysub, polymul, polydiv, polypow - Notes - ----- - - .. versionadded:: 1.5.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -407,7 +402,7 @@ def polydiv(c1, c2): # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if c2[-1] == 0: - raise ZeroDivisionError() + raise ZeroDivisionError # FIXME: add message with details to exception # note: this is more efficient than `pu._div(polymul, c1, c2)` lc1 = len(c1) @@ -495,8 +490,6 @@ def polyder(c, m=1, scl=1, axis=0): axis : int, optional Axis over which the derivative is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- der : ndarray @@ -586,8 +579,6 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): axis : int, optional Axis over which the integral is taken. (Default: 0). - .. versionadded:: 1.7.0 - Returns ------- S : ndarray @@ -712,8 +703,6 @@ def polyval(x, c, tensor=True): over the columns of `c` for the evaluation. This keyword is useful when `c` is multidimensional. The default value is True. - .. versionadded:: 1.7.0 - Returns ------- values : ndarray, compatible object @@ -787,8 +776,6 @@ def polyvalfromroots(x, r, tensor=True): evaluated only for the corresponding broadcast value of `x`. Note that scalars have shape (,). - .. versionadded:: 1.12 - Parameters ---------- x : array_like, compatible object @@ -896,16 +883,11 @@ def polyval2d(x, y, c): -------- polyval, polygrid2d, polyval3d, polygrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P >>> c = ((1, 2, 3), (4, 5, 6)) - >>> P.polyval2d(1, 1, c) + >>> P.polyval2d(1, 1, c) 21.0 """ @@ -956,11 +938,6 @@ def polygrid2d(x, y, c): -------- polyval, polyval2d, polyval3d, polygrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1015,11 +992,6 @@ def polyval3d(x, y, z, c): -------- polyval, polyval2d, polygrid2d, polygrid3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1078,11 +1050,6 @@ def polygrid3d(x, y, z, c): -------- polyval, polyval2d, polygrid2d, polyval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1135,7 +1102,7 @@ def polyvander(x, deg): Examples -------- The Vandermonde matrix of degree ``deg = 5`` and sample points - ``x = [-1, 2, 3]`` contains the element-wise powers of `x` + ``x = [-1, 2, 3]`` contains the element-wise powers of `x` from 0 to 5 as its columns. >>> from numpy.polynomial import polynomial as P @@ -1236,7 +1203,7 @@ def polyvander2d(x, y, deg): >>> P.polyvander2d(x=x, y=0*x, deg=(m, 0)) == P.polyvander(x=x, deg=m) array([[ True, True], [ True, True]]) - + """ return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) @@ -1286,11 +1253,6 @@ def polyvander3d(x, y, z, deg): -------- polyvander, polyvander3d, polyval2d, polyval3d - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> import numpy as np @@ -1308,7 +1270,7 @@ def polyvander3d(x, y, z, deg): -8., 8., 16., 4., 8., -8., -16., 16., 32.], [ 1., 5., -3., -15., 9., 45., 1., 5., -3., -15., 9., 45., 1., 5., -3., -15., 9., 45.]]) - + We can verify the columns for any ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= k <= n`` @@ -1367,8 +1329,6 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): same variance. When using inverse-variance weighting, use ``w[i] = 1/sigma(y[i])``. The default value is None. - .. versionadded:: 1.5.0 - Returns ------- coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) @@ -1468,7 +1428,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) >>> stats # note the minuscule SSR [array([8.79579319e-31]), - 4, + np.int32(4), array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), 1.1324274851176597e-14] @@ -1495,11 +1455,6 @@ def polycompanion(c): mat : ndarray Companion matrix of dimensions (deg, deg). - Notes - ----- - - .. versionadded:: 1.7.0 - Examples -------- >>> from numpy.polynomial import polynomial as P @@ -1609,8 +1564,6 @@ class Polynomial(ABCPolyBase): The default value is [-1., 1.]. window : (2,) array_like, optional Window, see `domain` for its use. The default value is [-1., 1.]. - - .. versionadded:: 1.6.0 symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index b3987d0c623b..1a6813b786c9 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -533,7 +533,7 @@ def _div(mul_f, c1, c2): # c1, c2 are trimmed copies [c1, c2] = as_series([c1, c2]) if c2[-1] == 0: - raise ZeroDivisionError() + raise ZeroDivisionError # FIXME: add message with details to exception lc1 = len(c1) lc2 = len(c2) @@ -702,7 +702,7 @@ def _pow(mul_f, c, pow, maxpower): def _as_int(x, desc): """ - Like `operator.index`, but emits a custom exception when passed an + Like `operator.index`, but emits a custom exception when passed an incorrect type Parameters @@ -745,7 +745,7 @@ def format_float(x, parens=False): if exp_format: s = dragon4_scientific(x, precision=opts['precision'], - unique=unique, trim=trim, + unique=unique, trim=trim, sign=opts['sign'] == '+') if parens: s = '(' + s + ')' diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 53ee0844e3c5..2188800853f2 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -105,7 +105,7 @@ def test_hermpow(self): msg = f"At i={i}, j={j}" c = np.arange(i + 1) tgt = reduce(herm.hermmul, [c]*j, np.array([1])) - res = herm.hermpow(c, j) + res = herm.hermpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 227ef3c5576d..49f7c7e115be 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -102,7 +102,7 @@ def test_lagpow(self): msg = f"At i={i}, j={j}" c = np.arange(i + 1) tgt = reduce(lag.lagmul, [c]*j, np.array([1])) - res = lag.lagpow(c, j) + res = lag.lagpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 92399c160ecb..9f1c9733a911 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -106,7 +106,7 @@ def test_legpow(self): msg = f"At i={i}, j={j}" c = np.arange(i + 1) tgt = reduce(leg.legmul, [c]*j, np.array([1])) - res = leg.legpow(c, j) + res = leg.legpow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index a0be94c3a6a0..d36b07dbd953 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -5,11 +5,12 @@ from fractions import Fraction import numpy as np import numpy.polynomial.polynomial as poly +import numpy.polynomial.polyutils as pu import pickle from copy import deepcopy from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex) + assert_array_equal, assert_raises_regex, assert_warns) def trim(x): @@ -119,7 +120,7 @@ def test_polypow(self): msg = f"At i={i}, j={j}" c = np.arange(i + 1) tgt = reduce(poly.polymul, [c]*j, np.array([1])) - res = poly.polypow(c, j) + res = poly.polypow(c, j) assert_equal(trim(res), trim(tgt), err_msg=msg) class TestFraction: @@ -130,7 +131,7 @@ def test_Fraction(self): one = Fraction(1, 1) zero = Fraction(0, 1) p = poly.Polynomial([f, f], domain=[zero, one], window=[zero, one]) - + x = 2 * p + p ** 2 assert_equal(x.coef, np.array([Fraction(16, 9), Fraction(20, 9), Fraction(4, 9)], dtype=object)) @@ -628,6 +629,14 @@ def test_polyline(self): def test_polyline_zero(self): assert_equal(poly.polyline(3, 0), [3]) + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with assert_warns(pu.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + def test_result_type(self): w = np.array([-1, 1], dtype=np.float32) p = np.polynomial.Polynomial(w, domain=w, window=w) diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py index 95dec549350c..6651f6cd9205 100644 --- a/numpy/polynomial/tests/test_printing.py +++ b/numpy/polynomial/tests/test_printing.py @@ -496,7 +496,7 @@ def test_numeric_object_coefficients(self): class TestPrintOptions: """ Test the output is properly configured via printoptions. - The exponential notation is enabled automatically when the values + The exponential notation is enabled automatically when the values are too small or too large. """ @@ -519,7 +519,7 @@ def test_latex(self): r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' r'\text{14285714.28571429}\,x^{2} + ' r'\text{(1.42857143e+08)}\,x^{3}$') - + with printoptions(precision=3): assert_equal(p._repr_latex_(), r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' @@ -528,20 +528,20 @@ def test_latex(self): def test_fixed(self): p = poly.Polynomial([1/2]) assert_equal(str(p), '0.5') - + with printoptions(floatmode='fixed'): assert_equal(str(p), '0.50000000') - + with printoptions(floatmode='fixed', precision=4): assert_equal(str(p), '0.5000') def test_switch_to_exp(self): for i, s in enumerate(SWITCH_TO_EXP): with printoptions(precision=i): - p = poly.Polynomial([1.23456789*10**-i + p = poly.Polynomial([1.23456789*10**-i for i in range(i//2+3)]) - assert str(p).replace('\n', ' ') == s - + assert str(p).replace('\n', ' ') == s + def test_non_finite(self): p = poly.Polynomial([nan, inf]) assert str(p) == 'nan + inf x' diff --git a/numpy/random/__init__.pyi b/numpy/random/__init__.pyi index 26cba3c90502..8cfa9c0e1369 100644 --- a/numpy/random/__init__.pyi +++ b/numpy/random/__init__.pyi @@ -1,71 +1,126 @@ -from numpy._pytesttester import PytestTester - -from numpy.random._generator import Generator as Generator -from numpy.random._generator import default_rng as default_rng -from numpy.random._mt19937 import MT19937 as MT19937 -from numpy.random._pcg64 import ( - PCG64 as PCG64, - PCG64DXSM as PCG64DXSM, -) -from numpy.random._philox import Philox as Philox -from numpy.random._sfc64 import SFC64 as SFC64 -from numpy.random.bit_generator import BitGenerator as BitGenerator -from numpy.random.bit_generator import SeedSequence as SeedSequence -from numpy.random.mtrand import ( - RandomState as RandomState, - beta as beta, - binomial as binomial, - bytes as bytes, - chisquare as chisquare, - choice as choice, - dirichlet as dirichlet, - exponential as exponential, - f as f, - gamma as gamma, - geometric as geometric, - get_bit_generator as get_bit_generator, - get_state as get_state, - gumbel as gumbel, - hypergeometric as hypergeometric, - laplace as laplace, - logistic as logistic, - lognormal as lognormal, - logseries as logseries, - multinomial as multinomial, - multivariate_normal as multivariate_normal, - negative_binomial as negative_binomial, - noncentral_chisquare as noncentral_chisquare, - noncentral_f as noncentral_f, - normal as normal, - pareto as pareto, - permutation as permutation, - poisson as poisson, - power as power, - rand as rand, - randint as randint, - randn as randn, - random as random, - random_integers as random_integers, - random_sample as random_sample, - ranf as ranf, - rayleigh as rayleigh, - sample as sample, - seed as seed, - set_bit_generator as set_bit_generator, - set_state as set_state, - shuffle as shuffle, - standard_cauchy as standard_cauchy, - standard_exponential as standard_exponential, - standard_gamma as standard_gamma, - standard_normal as standard_normal, - standard_t as standard_t, - triangular as triangular, - uniform as uniform, - vonmises as vonmises, - wald as wald, - weibull as weibull, - zipf as zipf, +from ._generator import Generator +from ._generator import default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .bit_generator import SeedSequence +from .mtrand import ( + RandomState, + beta, + binomial, + bytes, + chisquare, + choice, + dirichlet, + exponential, + f, + gamma, + geometric, + get_bit_generator, # noqa: F401 + get_state, + gumbel, + hypergeometric, + laplace, + logistic, + lognormal, + logseries, + multinomial, + multivariate_normal, + negative_binomial, + noncentral_chisquare, + noncentral_f, + normal, + pareto, + permutation, + poisson, + power, + rand, + randint, + randn, + random, + random_integers, + random_sample, + ranf, + rayleigh, + sample, + seed, + set_bit_generator, # noqa: F401 + set_state, + shuffle, + standard_cauchy, + standard_exponential, + standard_gamma, + standard_normal, + standard_t, + triangular, + uniform, + vonmises, + wald, + weibull, + zipf, ) -__all__: list[str] -test: PytestTester +__all__ = [ + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", + "Generator", + "RandomState", + "SeedSequence", + "MT19937", + "Philox", + "PCG64", + "PCG64DXSM", + "SFC64", + "default_rng", + "BitGenerator", +] diff --git a/numpy/random/_examples/cffi/parse.py b/numpy/random/_examples/cffi/parse.py index d41c4c2db23d..993cedee05eb 100644 --- a/numpy/random/_examples/cffi/parse.py +++ b/numpy/random/_examples/cffi/parse.py @@ -30,11 +30,11 @@ def parse_distributions_h(ffi, inc_dir): continue if line.strip().startswith('#ifdef __cplusplus'): ignoring = True - + # massage the include file if line.strip().startswith('#'): continue - + # skip any inlined function definition # which starts with 'static inline xxx(...) {' # and ends with a closing '}' @@ -45,7 +45,7 @@ def parse_distributions_h(ffi, inc_dir): in_skip += line.count('{') in_skip -= line.count('}') continue - + # replace defines with their value or remove them line = line.replace('DECLDIR', '') line = line.replace('RAND_INT_TYPE', 'int64_t') diff --git a/numpy/random/_examples/cython/extending.pyx b/numpy/random/_examples/cython/extending.pyx index 30efd7447748..6a0f45e1be9e 100644 --- a/numpy/random/_examples/cython/extending.pyx +++ b/numpy/random/_examples/cython/extending.pyx @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 #cython: language_level=3 from libc.stdint cimport uint32_t diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx index d908e92d01b0..59ecc4b36366 100644 --- a/numpy/random/_examples/cython/extending_distributions.pyx +++ b/numpy/random/_examples/cython/extending_distributions.pyx @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 #cython: language_level=3 """ This file shows how the to use a BitGenerator to create a distribution. diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 16a0e5e0ff8d..84b97883223d 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,5 +1,5 @@ from collections.abc import Callable -from typing import Any, overload, TypeVar, Literal +from typing import Any, TypeAlias, overload, TypeVar, Literal import numpy as np from numpy import ( @@ -17,7 +17,7 @@ from numpy import ( uint32, uint64, ) -from numpy.random import BitGenerator, SeedSequence +from numpy.random import BitGenerator, SeedSequence, RandomState from numpy._typing import ( ArrayLike, NDArray, @@ -25,8 +25,6 @@ from numpy._typing import ( _ArrayLikeInt_co, _DoubleCodes, _DTypeLikeBool, - _DTypeLikeInt, - _DTypeLikeUInt, _Float32Codes, _Float64Codes, _FloatLike_co, @@ -47,7 +45,7 @@ from numpy._typing import ( _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_DTypeLikeFloat32 = ( +_DTypeLikeFloat32: TypeAlias = ( dtype[float32] | _SupportsDType[dtype[float32]] | type[float32] @@ -55,7 +53,7 @@ _DTypeLikeFloat32 = ( | _SingleCodes ) -_DTypeLikeFloat64 = ( +_DTypeLikeFloat64: TypeAlias = ( dtype[float64] | _SupportsDType[dtype[float64]] | type[float] @@ -214,6 +212,8 @@ class Generator: low: int, high: None | int = ..., size: None = ..., + *, + endpoint: bool = ..., ) -> int: ... @overload def integers( # type: ignore[misc] @@ -338,6 +338,8 @@ class Generator: low: _ArrayLikeInt_co, high: None | _ArrayLikeInt_co = ..., size: None | _ShapeLike = ..., + *, + endpoint: bool = ... ) -> NDArray[int64]: ... @overload def integers( # type: ignore[misc] @@ -780,5 +782,5 @@ class Generator: def shuffle(self, x: ArrayLike, axis: int = ...) -> None: ... def default_rng( - seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator = ... + seed: None | _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState = ... ) -> Generator: ... diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index 221ac817b783..ac2f64a0f81c 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -22,6 +22,7 @@ from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64, _rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16, _rand_uint8, _gen_mask) from ._pcg64 import PCG64 +from ._mt19937 import MT19937 from numpy.random cimport bitgen_t from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE, CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, @@ -799,6 +800,9 @@ cdef class Generator: ``p`` must sum to 1 when cast to ``float64``. To ensure this, you may wish to normalize using ``p = p / np.sum(p, dtype=float)``. + When passing ``a`` as an integer type and ``size`` is not specified, the return + type is a native Python ``int``. + Examples -------- Generate a uniform random sample from np.arange(5) of size 3: @@ -1258,7 +1262,7 @@ cdef class Generator: >>> rng = np.random.default_rng() >>> s = rng.normal(mu, sigma, 1000) - Verify the mean and the variance: + Verify the mean and the standard deviation: >>> abs(mu - np.mean(s)) 0.0 # may vary @@ -1579,9 +1583,6 @@ cdef class Generator: ---------- dfnum : float or array_like of floats Numerator degrees of freedom, must be > 0. - - .. versionchanged:: 1.14.0 - Earlier NumPy versions required dfnum > 1. dfden : float or array_like of floats Denominator degrees of freedom, must be > 0. nonc : float or array_like of floats @@ -1679,7 +1680,7 @@ cdef class Generator: The variable obtained by summing the squares of `df` independent, standard normally distributed random variables: - .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i + .. math:: Q = \\sum_{i=1}^{\\mathtt{df}} X^2_i is chi-square distributed, denoted @@ -1736,9 +1737,6 @@ cdef class Generator: ---------- df : float or array_like of floats Degrees of freedom, must be > 0. - - .. versionchanged:: 1.10.0 - Earlier NumPy versions required dfnum > 1. nonc : float or array_like of floats Non-centrality, must be non-negative. size : int or tuple of ints, optional @@ -1983,7 +1981,7 @@ cdef class Generator: Draw samples from a von Mises distribution. Samples are drawn from a von Mises distribution with specified mode - (mu) and dispersion (kappa), on the interval [-pi, pi]. + (mu) and concentration (kappa), on the interval [-pi, pi]. The von Mises distribution (also known as the circular normal distribution) is a continuous probability distribution on the unit @@ -1995,7 +1993,7 @@ cdef class Generator: mu : float or array_like of floats Mode ("center") of the distribution. kappa : float or array_like of floats - Dispersion of the distribution, has to be >=0. + Concentration of the distribution, has to be >=0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2018,7 +2016,7 @@ cdef class Generator: .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)}, - where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion, + where :math:`\\mu` is the mode and :math:`\\kappa` the concentration, and :math:`I_0(\\kappa)` is the modified Bessel function of order 0. The von Mises is named for Richard Edler von Mises, who was born in @@ -2039,7 +2037,7 @@ cdef class Generator: -------- Draw samples from the distribution: - >>> mu, kappa = 0.0, 4.0 # mean and dispersion + >>> mu, kappa = 0.0, 4.0 # mean and concentration >>> rng = np.random.default_rng() >>> s = rng.vonmises(mu, kappa, 1000) @@ -3010,7 +3008,7 @@ cdef class Generator: Notes ----- - The probability density for the binomial distribution is + The probability mass function (PMF) for the binomial distribution is .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N}, @@ -3282,7 +3280,7 @@ cdef class Generator: Notes ----- - The Poisson distribution + The probability mass function (PMF) of Poisson distribution is .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!} @@ -3372,7 +3370,7 @@ cdef class Generator: Notes ----- - The probability density for the Zipf distribution is + The probability mass function (PMF) for the Zipf distribution is .. math:: p(k) = \\frac{k^{-a}}{\\zeta(a)}, @@ -3537,7 +3535,7 @@ cdef class Generator: Notes ----- - The probability density for the Hypergeometric distribution is + The probability mass function (PMF) for the Hypergeometric distribution is .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}}, @@ -3759,8 +3757,6 @@ cdef class Generator: the slowest method. The method `eigh` uses eigen decomposition to compute A and is faster than svd but slower than cholesky. - .. versionadded:: 1.18.0 - Returns ------- out : ndarray @@ -4009,9 +4005,6 @@ cdef class Generator: Each entry ``out[i,j,...,:]`` is a ``p``-dimensional value drawn from the distribution. - .. versionchanged:: 1.22.0 - Added support for broadcasting `pvals` against `n` - Examples -------- Throw a dice 20 times: @@ -4305,8 +4298,6 @@ cdef class Generator: performance of the algorithm is important, test the two methods with typical inputs to decide which works best. - .. versionadded:: 1.18.0 - Examples -------- >>> colors = [16, 8, 4] @@ -5003,7 +4994,7 @@ def default_rng(seed=None): Parameters ---------- - seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional + seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator, RandomState}, optional A seed to initialize the `BitGenerator`. If None, then fresh, unpredictable entropy will be pulled from the OS. If an ``int`` or ``array_like[ints]`` is passed, then all values must be non-negative and will be @@ -5011,6 +5002,7 @@ def default_rng(seed=None): pass in a `SeedSequence` instance. Additionally, when passed a `BitGenerator`, it will be wrapped by `Generator`. If passed a `Generator`, it will be returned unaltered. + When passed a legacy `RandomState` instance it will be coerced to a `Generator`. Returns ------- @@ -5083,6 +5075,13 @@ def default_rng(seed=None): elif isinstance(seed, Generator): # Pass through a Generator. return seed + elif isinstance(seed, np.random.RandomState): + gen = np.random.Generator(seed._bit_generator) + return gen + # Otherwise we need to instantiate a new BitGenerator and Generator as # normal. return Generator(PCG64(seed)) + + +default_rng.__module__ = "numpy.random" diff --git a/numpy/random/_mt19937.pyi b/numpy/random/_mt19937.pyi index 600411d5f641..430dd8041f50 100644 --- a/numpy/random/_mt19937.pyi +++ b/numpy/random/_mt19937.pyi @@ -1,14 +1,16 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy import uint32 from numpy.typing import NDArray from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +@type_check_only class _MT19937Internal(TypedDict): key: NDArray[uint32] pos: int +@type_check_only class _MT19937State(TypedDict): bit_generator: str state: _MT19937Internal diff --git a/numpy/random/_pcg64.pyi b/numpy/random/_pcg64.pyi index 470aee867493..15bb0525c9a5 100644 --- a/numpy/random/_pcg64.pyi +++ b/numpy/random/_pcg64.pyi @@ -1,12 +1,14 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +@type_check_only class _PCG64Internal(TypedDict): state: int inc: int +@type_check_only class _PCG64State(TypedDict): bit_generator: str state: _PCG64Internal diff --git a/numpy/random/_philox.pyi b/numpy/random/_philox.pyi index 485f3bc82dec..7206ae9702c0 100644 --- a/numpy/random/_philox.pyi +++ b/numpy/random/_philox.pyi @@ -1,14 +1,16 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy import uint64 from numpy.typing import NDArray from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import _ArrayLikeInt_co +@type_check_only class _PhiloxInternal(TypedDict): counter: NDArray[uint64] key: NDArray[uint64] +@type_check_only class _PhiloxState(TypedDict): bit_generator: str state: _PhiloxInternal diff --git a/numpy/random/_sfc64.pyi b/numpy/random/_sfc64.pyi index 09ea41139789..baaae7c668fb 100644 --- a/numpy/random/_sfc64.pyi +++ b/numpy/random/_sfc64.pyi @@ -1,12 +1,14 @@ -from typing import TypedDict +from typing import TypedDict, type_check_only from numpy import uint64 from numpy.random.bit_generator import BitGenerator, SeedSequence from numpy._typing import NDArray, _ArrayLikeInt_co +@type_check_only class _SFC64Internal(TypedDict): state: NDArray[uint64] +@type_check_only class _SFC64State(TypedDict): bit_generator: str state: _SFC64Internal diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index d99278e861ea..8dfbcd9909dd 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -4,10 +4,12 @@ from collections.abc import Callable, Mapping, Sequence from typing import ( Any, NamedTuple, + TypeAlias, TypedDict, TypeVar, overload, Literal, + type_check_only, ) from numpy import dtype, uint32, uint64 @@ -22,25 +24,27 @@ from numpy._typing import ( _T = TypeVar("_T") -_DTypeLikeUint32 = ( +_DTypeLikeUint32: TypeAlias = ( dtype[uint32] | _SupportsDType[dtype[uint32]] | type[uint32] | _UInt32Codes ) -_DTypeLikeUint64 = ( +_DTypeLikeUint64: TypeAlias = ( dtype[uint64] | _SupportsDType[dtype[uint64]] | type[uint64] | _UInt64Codes ) +@type_check_only class _SeedSeqState(TypedDict): entropy: None | int | Sequence[int] spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int +@type_check_only class _Interface(NamedTuple): state_address: Any state: Any diff --git a/numpy/random/c_distributions.pxd b/numpy/random/c_distributions.pxd index b978d13503ea..da790ca499df 100644 --- a/numpy/random/c_distributions.pxd +++ b/numpy/random/c_distributions.pxd @@ -1,4 +1,3 @@ -#!python #cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 from numpy cimport npy_intp diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index dbd3cd609495..16a722c0038e 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -5,7 +5,6 @@ from typing import Any, overload, Literal import numpy as np from numpy import ( dtype, - float32, float64, int8, int16, @@ -26,12 +25,7 @@ from numpy._typing import ( NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, - _DoubleCodes, _DTypeLikeBool, - _DTypeLikeInt, - _DTypeLikeUInt, - _Float32Codes, - _Float64Codes, _Int8Codes, _Int16Codes, _Int32Codes, @@ -39,7 +33,6 @@ from numpy._typing import ( _IntCodes, _LongCodes, _ShapeLike, - _SingleCodes, _SupportsDType, _UInt8Codes, _UInt16Codes, @@ -49,22 +42,6 @@ from numpy._typing import ( _ULongCodes, ) -_DTypeLikeFloat32 = ( - dtype[float32] - | _SupportsDType[dtype[float32]] - | type[float32] - | _Float32Codes - | _SingleCodes -) - -_DTypeLikeFloat64 = ( - dtype[float64] - | _SupportsDType[dtype[float64]] - | type[float] - | type[float64] - | _Float64Codes - | _DoubleCodes -) class RandomState: _bit_generator: BitGenerator diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index b42b0a7764b8..26d0f5f4d1a4 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -304,7 +304,7 @@ cdef class RandomState: st['gauss'] = self._aug_state.gauss if legacy and not isinstance(self._bit_generator, _MT19937): raise ValueError( - "legacy can only be True when the underlyign bitgenerator is " + "legacy can only be True when the underlying bitgenerator is " "an instance of MT19937." ) if legacy: @@ -718,8 +718,6 @@ cdef class RandomState: Desired dtype of the result. Byteorder must be native. The default value is long. - .. versionadded:: 1.11.0 - .. warning:: This function defaults to the C-long dtype, which is 32bit on windows and otherwise 64bit on 64bit platforms (and 32bit on 32bit ones). @@ -861,8 +859,6 @@ cdef class RandomState: Generates a random sample from a given 1-D array - .. versionadded:: 1.7.0 - .. note:: New code should use the `~numpy.random.Generator.choice` method of a `~numpy.random.Generator` instance instead; @@ -1138,7 +1134,7 @@ cdef class RandomState: >>> x = np.float32(5*0.99999999) >>> x - 5.0 + np.float32(5.0) Examples @@ -1551,7 +1547,7 @@ cdef class RandomState: >>> mu, sigma = 0, 0.1 # mean and standard deviation >>> s = np.random.normal(mu, sigma, 1000) - Verify the mean and the variance: + Verify the mean and the standard deviation: >>> abs(mu - np.mean(s)) 0.0 # may vary @@ -1864,9 +1860,6 @@ cdef class RandomState: ---------- dfnum : float or array_like of floats Numerator degrees of freedom, must be > 0. - - .. versionchanged:: 1.14.0 - Earlier NumPy versions required dfnum > 1. dfden : float or array_like of floats Denominator degrees of freedom, must be > 0. nonc : float or array_like of floats @@ -1976,7 +1969,7 @@ cdef class RandomState: The variable obtained by summing the squares of `df` independent, standard normally distributed random variables: - .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i + .. math:: Q = \\sum_{i=1}^{\\mathtt{df}} X^2_i is chi-square distributed, denoted @@ -2025,9 +2018,6 @@ cdef class RandomState: ---------- df : float or array_like of floats Degrees of freedom, must be > 0. - - .. versionchanged:: 1.10.0 - Earlier NumPy versions required dfnum > 1. nonc : float or array_like of floats Non-centrality, must be non-negative. size : int or tuple of ints, optional @@ -2292,7 +2282,7 @@ cdef class RandomState: Draw samples from a von Mises distribution. Samples are drawn from a von Mises distribution with specified mode - (mu) and dispersion (kappa), on the interval [-pi, pi]. + (mu) and concentration (kappa), on the interval [-pi, pi]. The von Mises distribution (also known as the circular normal distribution) is a continuous probability distribution on the unit @@ -2309,7 +2299,7 @@ cdef class RandomState: mu : float or array_like of floats Mode ("center") of the distribution. kappa : float or array_like of floats - Dispersion of the distribution, has to be >=0. + Concentration of the distribution, has to be >=0. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -2333,7 +2323,7 @@ cdef class RandomState: .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)}, - where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion, + where :math:`\\mu` is the mode and :math:`\\kappa` the concentration, and :math:`I_0(\\kappa)` is the modified Bessel function of order 0. The von Mises is named for Richard Edler von Mises, who was born in @@ -2354,7 +2344,7 @@ cdef class RandomState: -------- Draw samples from the distribution: - >>> mu, kappa = 0.0, 4.0 # mean and dispersion + >>> mu, kappa = 0.0, 4.0 # mean and concentration >>> s = np.random.vonmises(mu, kappa, 1000) Display the histogram of the samples, along with @@ -3416,7 +3406,7 @@ cdef class RandomState: Notes ----- - The probability density for the binomial distribution is + The probability mass function (PMF) for the binomial distribution is .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N}, @@ -3656,7 +3646,7 @@ cdef class RandomState: Notes ----- - The Poisson distribution + The probability mass function (PMF) of Poisson distribution is .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!} @@ -3744,7 +3734,7 @@ cdef class RandomState: Notes ----- - The probability density for the Zipf distribution is + The probability mass function (PMF) for the Zipf distribution is .. math:: p(k) = \\frac{k^{-a}}{\\zeta(a)}, @@ -3908,7 +3898,7 @@ cdef class RandomState: Notes ----- - The probability density for the Hypergeometric distribution is + The probability mass function (PMF) for the Hypergeometric distribution is .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}}, @@ -4912,6 +4902,7 @@ def ranf(*args, **kwargs): return _rand.random_sample(*args, **kwargs) __all__ = [ + 'RandomState', 'beta', 'binomial', 'bytes', @@ -4964,5 +4955,18 @@ __all__ = [ 'wald', 'weibull', 'zipf', - 'RandomState', ] + +seed.__module__ = "numpy.random" +ranf.__module__ = "numpy.random" +sample.__module__ = "numpy.random" +get_bit_generator.__module__ = "numpy.random" +set_bit_generator.__module__ = "numpy.random" + +# The first item in __all__ is 'RandomState', so it can be skipped here. +for method_name in __all__[1:]: + method = getattr(RandomState, method_name, None) + if method is not None: + method.__module__ = "numpy.random" + +del method, method_name diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c index e718c2d06cc8..32f40fa49cc1 100644 --- a/numpy/random/src/mt19937/randomkit.c +++ b/numpy/random/src/mt19937/randomkit.c @@ -135,7 +135,7 @@ #define RK_DEV_RANDOM "/dev/random" #endif -char *rk_strerror[RK_ERR_MAX] = {"no error", "random device unvavailable"}; +char *rk_strerror[RK_ERR_MAX] = {"no error", "random device unavailable"}; /* static functions */ static unsigned long rk_hash(unsigned long key); diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 12c2f1d5ab57..3ef94b63ac59 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -538,7 +538,7 @@ def test_legacy_pickle(self): ) base_path = os.path.split(os.path.abspath(__file__))[0] - pkl_file = os.path.join(base_path, "data", f"sfc64_np126.pkl.gz") + pkl_file = os.path.join(base_path, "data", "sfc64_np126.pkl.gz") with gzip.open(pkl_file) as gz: sfc = pickle.load(gz) @@ -559,3 +559,22 @@ def test_passthrough(self): rg2 = default_rng(rg) assert rg2 is rg assert rg2.bit_generator is bg + + def test_coercion_RandomState_Generator(self): + # use default_rng to coerce RandomState to Generator + rs = RandomState(1234) + rg = default_rng(rs) + assert isinstance(rg.bit_generator, MT19937) + assert rg.bit_generator is rs._bit_generator + + # RandomState with a non MT19937 bit generator + _original = np.random.get_bit_generator() + bg = PCG64(12342298) + np.random.set_bit_generator(bg) + rs = np.random.mtrand._rand + rg = default_rng(rs) + assert rg.bit_generator is bg + + # vital to get global state back to original, otherwise + # other tests start to fail. + np.random.set_bit_generator(_original) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 791fbaba9850..d6ffea0b2dbf 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -1,12 +1,10 @@ from importlib.util import spec_from_file_location, module_from_spec import os -import pathlib import pytest import shutil import subprocess import sys import sysconfig -import textwrap import warnings import numpy as np @@ -65,14 +63,23 @@ def test_cython(tmp_path): build_dir = tmp_path / 'random' / '_examples' / 'cython' target_dir = build_dir / "build" os.makedirs(target_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") if sys.platform == "win32": subprocess.check_call(["meson", "setup", - "--buildtype=release", - "--vsenv", str(build_dir)], + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(build_dir)], cwd=target_dir, ) else: - subprocess.check_call(["meson", "setup", str(build_dir)], + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(build_dir)], cwd=target_dir ) subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) @@ -83,7 +90,7 @@ def test_cython(tmp_path): g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) with open(g[0]) as fid: txt_to_find = 'NumPy API declarations from "numpy/__init__' - for i, line in enumerate(fid): + for line in fid: if txt_to_find in line: break else: diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 514f9af2ce8c..c9dc81e96a37 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1244,7 +1244,7 @@ def test_dirichlet_small_alpha(self): @pytest.mark.slow def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path - alpha = np.array([0.02, 0.04, 0.03]) + alpha = np.array([0.02, 0.04]) exact_mean = alpha / alpha.sum() random = Generator(MT19937(self.seed)) sample = random.dirichlet(alpha, size=20000000) diff --git a/numpy/rec/__init__.pyi b/numpy/rec/__init__.pyi index 776db577cf9c..605770f7c9c0 100644 --- a/numpy/rec/__init__.pyi +++ b/numpy/rec/__init__.pyi @@ -1,13 +1,22 @@ from numpy._core.records import ( - record as record, - recarray as recarray, - format_parser as format_parser, - fromarrays as fromarrays, - fromrecords as fromrecords, - fromstring as fromstring, - fromfile as fromfile, - array as array + record, + recarray, + find_duplicate, + format_parser, + fromarrays, + fromrecords, + fromstring, + fromfile, + array, ) - -__all__: list[str] -__path__: list[str] +__all__ = [ + "record", + "recarray", + "format_parser", + "fromarrays", + "fromrecords", + "fromstring", + "fromfile", + "array", + "find_duplicate", +] diff --git a/numpy/strings/__init__.pyi b/numpy/strings/__init__.pyi index 927b0c9bd415..fb03e9c8b5e6 100644 --- a/numpy/strings/__init__.pyi +++ b/numpy/strings/__init__.pyi @@ -1,53 +1,95 @@ from numpy._core.strings import ( - equal as equal, - not_equal as not_equal, - greater_equal as greater_equal, - less_equal as less_equal, - greater as greater, - less as less, - add as add, - multiply as multiply, - mod as mod, - isalpha as isalpha, - isalnum as isalnum, - isdigit as isdigit, - isspace as isspace, - isnumeric as isnumeric, - isdecimal as isdecimal, - islower as islower, - isupper as isupper, - istitle as istitle, - str_len as str_len, - find as find, - rfind as rfind, - index as index, - rindex as rindex, - count as count, - startswith as startswith, - endswith as endswith, - decode as decode, - encode as encode, - expandtabs as expandtabs, - center as center, - ljust as ljust, - rjust as rjust, - lstrip as lstrip, - rstrip as rstrip, - strip as strip, - zfill as zfill, - upper as upper, - lower as lower, - swapcase as swapcase, - capitalize as capitalize, - title as title, - replace as replace, - join as join, - split as split, - rsplit as rsplit, - splitlines as splitlines, - partition as partition, - rpartition as rpartition, - translate as translate, + equal, + not_equal, + greater_equal, + less_equal, + greater, + less, + add, + multiply, + mod, + isalpha, + isalnum, + isdigit, + isspace, + isnumeric, + isdecimal, + islower, + isupper, + istitle, + str_len, + find, + rfind, + index, + rindex, + count, + startswith, + endswith, + decode, + encode, + expandtabs, + center, + ljust, + rjust, + lstrip, + rstrip, + strip, + zfill, + upper, + lower, + swapcase, + capitalize, + title, + replace, + partition, + rpartition, + translate, ) -__all__: list[str] +__all__ = [ + "equal", + "not_equal", + "less", + "less_equal", + "greater", + "greater_equal", + "add", + "multiply", + "isalpha", + "isdigit", + "isspace", + "isalnum", + "islower", + "isupper", + "istitle", + "isdecimal", + "isnumeric", + "str_len", + "find", + "rfind", + "index", + "rindex", + "count", + "startswith", + "endswith", + "lstrip", + "rstrip", + "strip", + "replace", + "expandtabs", + "center", + "ljust", + "rjust", + "zfill", + "partition", + "rpartition", + "upper", + "lower", + "swapcase", + "capitalize", + "title", + "mod", + "decode", + "encode", + "translate", +] diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index 2e4f76471b7c..e47b8f9546c6 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -1,49 +1,98 @@ -from numpy._pytesttester import PytestTester +from unittest import TestCase -from unittest import ( - TestCase as TestCase, +from . import overrides +from ._private.utils import ( + NOGIL_BUILD, + IS_WASM, + IS_PYPY, + IS_PYSTON, + IS_MUSL, + IS_EDITABLE, + HAS_REFCOUNT, + HAS_LAPACK64, + assert_equal, + assert_almost_equal, + assert_approx_equal, + assert_array_equal, + assert_array_less, + assert_string_equal, + assert_array_almost_equal, + assert_raises, + build_err_msg, + decorate_methods, + jiffies, + memusage, + print_assert_equal, + rundocs, + runstring, + verbose, + measure, + assert_, + assert_array_almost_equal_nulp, + assert_raises_regex, + assert_array_max_ulp, + assert_warns, + assert_no_warnings, + assert_allclose, + IgnoreException, + clear_and_catch_warnings, + SkipTest, + KnownFailureException, + temppath, + tempdir, + suppress_warnings, + assert_array_compare, + assert_no_gc_cycles, + break_cycles, + check_support_sve, + run_threaded, ) -from numpy.testing._private.utils import ( - assert_equal as assert_equal, - assert_almost_equal as assert_almost_equal, - assert_approx_equal as assert_approx_equal, - assert_array_equal as assert_array_equal, - assert_array_less as assert_array_less, - assert_string_equal as assert_string_equal, - assert_array_almost_equal as assert_array_almost_equal, - assert_raises as assert_raises, - build_err_msg as build_err_msg, - decorate_methods as decorate_methods, - jiffies as jiffies, - memusage as memusage, - print_assert_equal as print_assert_equal, - rundocs as rundocs, - runstring as runstring, - verbose as verbose, - measure as measure, - assert_ as assert_, - assert_array_almost_equal_nulp as assert_array_almost_equal_nulp, - assert_raises_regex as assert_raises_regex, - assert_array_max_ulp as assert_array_max_ulp, - assert_warns as assert_warns, - assert_no_warnings as assert_no_warnings, - assert_allclose as assert_allclose, - IgnoreException as IgnoreException, - clear_and_catch_warnings as clear_and_catch_warnings, - SkipTest as SkipTest, - KnownFailureException as KnownFailureException, - temppath as temppath, - tempdir as tempdir, - IS_PYPY as IS_PYPY, - IS_PYSTON as IS_PYSTON, - HAS_REFCOUNT as HAS_REFCOUNT, - suppress_warnings as suppress_warnings, - assert_array_compare as assert_array_compare, - assert_no_gc_cycles as assert_no_gc_cycles, - break_cycles as break_cycles, - HAS_LAPACK64 as HAS_LAPACK64, -) - -__all__: list[str] -test: PytestTester +__all__ = [ + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_array_almost_equal", + "assert_raises", + "build_err_msg", + "decorate_methods", + "jiffies", + "memusage", + "print_assert_equal", + "rundocs", + "runstring", + "verbose", + "measure", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "IgnoreException", + "clear_and_catch_warnings", + "SkipTest", + "KnownFailureException", + "temppath", + "tempdir", + "IS_PYPY", + "HAS_REFCOUNT", + "IS_WASM", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "HAS_LAPACK64", + "IS_PYSTON", + "IS_MUSL", + "check_support_sve", + "NOGIL_BUILD", + "IS_EDITABLE", + "run_threaded", + "TestCase", + "overrides", +] diff --git a/numpy/testing/_private/__init__.pyi b/numpy/testing/_private/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 08cbb0564e67..4fd0d839f249 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -235,7 +235,7 @@ def build(cfile, outputfilename, compile_extra, link_extra, """)) if sys.platform == "win32": subprocess.check_call(["meson", "setup", - "--buildtype=release", + "--buildtype=release", "--vsenv", ".."], cwd=build_dir, ) @@ -245,7 +245,7 @@ def build(cfile, outputfilename, compile_extra, link_extra, ) subprocess.check_call(["meson", "compile"], cwd=build_dir) os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name) - + def get_so_suffix(): ret = sysconfig.get_config_var('EXT_SUFFIX') assert ret diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi new file mode 100644 index 000000000000..609a45e79d16 --- /dev/null +++ b/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] = [], + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] = [], + library_dirs: Sequence[str] = [], +) -> pathlib.Path: ... diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index f22df0ddaab8..01fe6327713c 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -4,6 +4,7 @@ """ import os import sys +import pathlib import platform import re import gc @@ -18,6 +19,8 @@ import pprint import sysconfig import concurrent.futures +import threading +import importlib.metadata import numpy as np from numpy._core import ( @@ -25,9 +28,11 @@ from numpy import isfinite, isnan, isinf import numpy.linalg._umath_linalg from numpy._utils import _rename_parameter +from numpy._core.tests._natype import pd_NA from io import StringIO + __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', @@ -40,8 +45,8 @@ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', - '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE', 'NOGIL_BUILD', - 'IS_EDITABLE', 'run_threaded', + 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', ] @@ -53,15 +58,43 @@ class KnownFailureException(Exception): KnownFailureTest = KnownFailureException # backwards compat verbose = 0 +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info >= (3, 13): + IS_EDITABLE = np_dist.origin.dir_info.editable + else: + # Backport importlib.metadata.Distribution.origin + import json, types # noqa: E401 + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + IS_WASM = platform.machine() in ["wasm32", "wasm64"] IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") -IS_EDITABLE = not bool(np.__path__) or 'editable' in np.__path__[0] HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 -_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy' - IS_MUSL = False # alternate way is # from packaging.tags import sys_tags @@ -102,14 +135,15 @@ def GetPerformanceAttributes(object, counter, instance=None, # thread's CPU usage is either 0 or 100). To read counters like this, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + #(dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, - inum, counter)) + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) hq = win32pdh.OpenQuery() try: hc = win32pdh.AddCounter(hq, path) @@ -167,7 +201,7 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): l = f.readline().split(' ') return int(l[13]) except Exception: - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) else: # os.getpid is not in all platforms available. # Using time is safe but inaccurate, especially when process @@ -183,7 +217,7 @@ def jiffies(_load_time=[]): import time if not _load_time: _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) def build_err_msg(arrays, err_msg, header='Items are not equal:', @@ -191,7 +225,7 @@ def build_err_msg(arrays, err_msg, header='Items are not equal:', msg = ['\n' + header] err_msg = str(err_msg) if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): msg = [msg[0] + ' ' + err_msg] else: msg.append(err_msg) @@ -466,7 +500,6 @@ def print_assert_equal(test_string, actual, desired): raise AssertionError(msg.getvalue()) -@np._no_nep50_warning() def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): """ Raises an AssertionError if two items are not equal up to desired @@ -593,7 +626,6 @@ def _build_err_msg(): raise AssertionError(_build_err_msg()) -@np._no_nep50_warning() def assert_approx_equal(actual, desired, significant=7, err_msg='', verbose=True): """ @@ -662,14 +694,14 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', # Normalized the numbers to be in range (-10.0,10.0) # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = 0.5 * (np.abs(desired) + np.abs(actual)) scale = np.power(10, np.floor(np.log10(scale))) try: - sc_desired = desired/scale + sc_desired = desired / scale except ZeroDivisionError: sc_desired = 0.0 try: - sc_actual = actual/scale + sc_actual = actual / scale except ZeroDivisionError: sc_actual = 0.0 msg = build_err_msg( @@ -690,11 +722,10 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', return except (TypeError, NotImplementedError): pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): raise AssertionError(msg) -@np._no_nep50_warning() def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', precision=6, equal_nan=True, equal_inf=True, *, strict=False, names=('ACTUAL', 'DESIRED')): @@ -856,27 +887,27 @@ def func_assert_same_pos(x, y, func=isnan, hasval='nan'): remarks.append( 'Max absolute difference among violations: ' + array2string(max_abs_error)) - + # note: this definition of relative error matches that one # used by assert_allclose (found in np.isclose) # Filter values where the divisor would be zero nonzero = np.bool(y != 0) nonzero_and_invalid = np.logical_and(invalids, nonzero) - + if all(~nonzero_and_invalid): max_rel_error = array(inf) else: nonzero_invalid_error = error[nonzero_and_invalid] broadcasted_y = np.broadcast_to(y, error.shape) nonzero_invalid_y = broadcasted_y[nonzero_and_invalid] - max_rel_error = max(nonzero_invalid_error + max_rel_error = max(nonzero_invalid_error / abs(nonzero_invalid_y)) - if getattr(error, 'dtype', object_) == object_: + if getattr(error, 'dtype', object_) == object_: remarks.append( 'Max relative difference among violations: ' + str(max_rel_error)) - else: + else: remarks.append( 'Max relative difference among violations: ' + array2string(max_rel_error)) @@ -1027,7 +1058,6 @@ def assert_array_equal(actual, desired, err_msg='', verbose=True, *, strict=strict) -@np._no_nep50_warning() @_rename_parameter(['x', 'y'], ['actual', 'desired'], dep_version='2.0.0') def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', verbose=True): @@ -1380,22 +1410,25 @@ def rundocs(filename=None, raise_on_error=True): raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) -def check_support_sve(): +def check_support_sve(__cache=[]): """ gh-22982 """ - + + if __cache: + return __cache[0] + import subprocess cmd = 'lscpu' try: output = subprocess.run(cmd, capture_output=True, text=True) - return 'sve' in output.stdout - except OSError: - return False + result = 'sve' in output.stdout + except (OSError, subprocess.SubprocessError): + result = False + __cache.append(result) + return __cache[0] -_SUPPORTS_SVE = check_support_sve() - # # assert_raises and assert_raises_regex are taken from unittest. # @@ -1450,11 +1483,6 @@ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): args and keyword arguments kwargs. Alternatively, can be used as a context manager like `assert_raises`. - - Notes - ----- - .. versionadded:: 1.9.0 - """ __tracebackhide__ = True # Hide traceback for py.test return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) @@ -1550,7 +1578,7 @@ def measure(code_str, times=1, label=None): i += 1 exec(code, globs, locs) elapsed = jiffies() - elapsed - return 0.01*elapsed + return 0.01 * elapsed def _assert_valid_refcount(op): @@ -1564,7 +1592,7 @@ def _assert_valid_refcount(op): import gc import numpy as np - b = np.arange(100*100).reshape(100, 100) + b = np.arange(100 * 100).reshape(100, 100) c = b i = 1 @@ -1595,8 +1623,6 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, that ``allclose`` has different default values). It compares the difference between `actual` and `desired` to ``atol + rtol * abs(desired)``. - .. versionadded:: 1.5.0 - Parameters ---------- actual : array_like @@ -1744,7 +1770,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): + if not np.all(np.abs(x - y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = f"Arrays are not equal to {nulp} ULP" else: @@ -1860,7 +1886,7 @@ def nulp_diff(x, y, dtype=None): (x.shape, y.shape)) def _diff(rx, ry, vdt): - diff = np.asarray(rx-ry, dtype=vdt) + diff = np.asarray(rx - ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) @@ -1924,8 +1950,6 @@ def assert_warns(warning_class, *args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. - .. versionadded:: 1.4.0 - Parameters ---------- warning_class : class @@ -1991,8 +2015,6 @@ def assert_no_warnings(*args, **kwargs): The ability to be used as a context manager is new in NumPy v1.11.0. - .. versionadded:: 1.7.0 - Parameters ---------- func : callable @@ -2532,8 +2554,6 @@ def assert_no_gc_cycles(*args, **kwargs): with assert_no_gc_cycles(): do_something() - .. versionadded:: 1.15.0 - Parameters ---------- func : callable @@ -2611,7 +2631,7 @@ def check_free_memory(free_bytes): except ValueError as exc: raise ValueError(f'Invalid environment variable {env_var}: {exc}') - msg = (f'{free_bytes/1e9} GB memory required, but environment variable ' + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' f'NPY_AVAILABLE_MEM={env_value} set') else: mem_free = _get_mem_available() @@ -2622,7 +2642,9 @@ def check_free_memory(free_bytes): "the test.") mem_free = -1 else: - msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available' + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' return msg if mem_free < free_bytes else None @@ -2700,12 +2722,38 @@ def _get_glibc_version(): _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) -def run_threaded(func, iters, pass_count=False): +def run_threaded(func, max_workers=8, pass_count=False, + pass_barrier=False, outer_iterations=1, + prepare_args=None): """Runs a function many times in parallel""" - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - if pass_count: - futures = [tpe.submit(func, i) for i in range(iters)] - else: - futures = [tpe.submit(func) for _ in range(iters)] - for f in futures: - f.result() + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] + try: + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + finally: + if len(futures) < max_workers and pass_barrier: + barrier.abort() + for f in futures: + f.result() + + +def get_stringdtype_dtype(na_object, coerce=True): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index 113457ae1c55..b2f4045c7703 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -1,18 +1,19 @@ -import os import sys import ast import types import warnings import unittest -import contextlib -from re import Pattern +from _typeshed import GenericPath, StrOrBytesPath, StrPath from collections.abc import Callable, Iterable, Sequence +from contextlib import _GeneratorContextManager +from re import Pattern from typing import ( Literal as L, Any, AnyStr, ClassVar, NoReturn, + TypeAlias, overload, type_check_only, TypeVar, @@ -22,7 +23,7 @@ from typing import ( ) import numpy as np -from numpy import number, object_, _FloatValue +from numpy import number, object_, _ConvertibleToFloat from numpy._typing import ( NDArray, ArrayLike, @@ -33,9 +34,54 @@ from numpy._typing import ( _ArrayLikeDT64_co, ) -from unittest.case import ( - SkipTest as SkipTest, -) +from unittest.case import SkipTest + +__all__ = [ + "IS_EDITABLE", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "NOGIL_BUILD", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_array_almost_equal", + "assert_raises", + "build_err_msg", + "decorate_methods", + "jiffies", + "memusage", + "print_assert_equal", + "rundocs", + "runstring", + "verbose", + "measure", + "IgnoreException", + "clear_and_catch_warnings", + "SkipTest", + "KnownFailureException", + "temppath", + "tempdir", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "check_support_sve", + "run_threaded", +] _P = ParamSpec("_P") _T = TypeVar("_T") @@ -44,7 +90,7 @@ _FT = TypeVar("_FT", bound=Callable[..., Any]) # Must return a bool or an ndarray/generic type # that is supported by `np.logical_and.reduce` -_ComparisonFunc = Callable[ +_ComparisonFunc: TypeAlias = Callable[ [NDArray[Any], NDArray[Any]], ( bool @@ -54,12 +100,10 @@ _ComparisonFunc = Callable[ ) ] -__all__: list[str] - class KnownFailureException(Exception): ... class IgnoreException(Exception): ... -class clear_and_catch_warnings(warnings.catch_warnings): +class clear_and_catch_warnings(warnings.catch_warnings[list[warnings.WarningMessage]]): class_modules: ClassVar[tuple[types.ModuleType, ...]] modules: set[types.ModuleType] @overload @@ -127,10 +171,14 @@ class suppress_warnings: def __call__(self, func: _FT) -> _FT: ... verbose: int +IS_EDITABLE: Final[bool] +IS_MUSL: Final[bool] IS_PYPY: Final[bool] IS_PYSTON: Final[bool] +IS_WASM: Final[bool] HAS_REFCOUNT: Final[bool] HAS_LAPACK64: Final[bool] +NOGIL_BUILD: Final[bool] def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ... @@ -139,13 +187,13 @@ def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ... if sys.platform == "win32" or sys.platform == "cygwin": def memusage(processName: str = ..., instance: int = ...) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ... + def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> None | int: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": def jiffies( - _proc_pid_stat: str | bytes | os.PathLike[Any] = ..., + _proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = ..., ) -> int: ... else: @@ -185,8 +233,8 @@ def assert_almost_equal( # Anything that can be coerced into `builtins.float` def assert_approx_equal( - actual: _FloatValue, - desired: _FloatValue, + actual: _ConvertibleToFloat, + desired: _ConvertibleToFloat, significant: int = ..., err_msg: object = ..., verbose: bool = ..., @@ -261,10 +309,12 @@ def runstring( def assert_string_equal(actual: str, desired: str) -> None: ... def rundocs( - filename: None | str | os.PathLike[str] = ..., + filename: StrPath | None = ..., raise_on_error: bool = ..., ) -> None: ... +def check_support_sve(__cache: list[_T]) -> _T: ... + def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ... @overload @@ -350,9 +400,7 @@ def assert_array_max_ulp( ) -> NDArray[Any]: ... @overload -def assert_warns( - warning_class: type[Warning], -) -> contextlib._GeneratorContextManager[None]: ... +def assert_warns(warning_class: type[Warning]) -> _GeneratorContextManager[None]: ... @overload def assert_warns( warning_class: type[Warning], @@ -363,7 +411,7 @@ def assert_warns( ) -> _T: ... @overload -def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ... +def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload def assert_no_warnings( func: Callable[_P, _T], @@ -377,13 +425,13 @@ def tempdir( suffix: None = ..., prefix: None = ..., dir: None = ..., -) -> contextlib._GeneratorContextManager[str]: ... +) -> _GeneratorContextManager[str]: ... @overload def tempdir( - suffix: None | AnyStr = ..., - prefix: None | AnyStr = ..., - dir: None | AnyStr | os.PathLike[AnyStr] = ..., -) -> contextlib._GeneratorContextManager[AnyStr]: ... + suffix: AnyStr | None = ..., + prefix: AnyStr | None = ..., + dir: GenericPath[AnyStr] | None = ..., +) -> _GeneratorContextManager[AnyStr]: ... @overload def temppath( @@ -391,17 +439,17 @@ def temppath( prefix: None = ..., dir: None = ..., text: bool = ..., -) -> contextlib._GeneratorContextManager[str]: ... +) -> _GeneratorContextManager[str]: ... @overload def temppath( - suffix: None | AnyStr = ..., - prefix: None | AnyStr = ..., - dir: None | AnyStr | os.PathLike[AnyStr] = ..., + suffix: AnyStr | None = ..., + prefix: AnyStr | None = ..., + dir: GenericPath[AnyStr] | None = ..., text: bool = ..., -) -> contextlib._GeneratorContextManager[AnyStr]: ... +) -> _GeneratorContextManager[AnyStr]: ... @overload -def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ... +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... @overload def assert_no_gc_cycles( func: Callable[_P, Any], @@ -411,3 +459,5 @@ def assert_no_gc_cycles( ) -> None: ... def break_cycles() -> None: ... + +def run_threaded(func: Callable[[], None], iters: int, pass_count: bool = False) -> None: ... diff --git a/numpy/testing/overrides.py b/numpy/testing/overrides.py index 98bed23c4f45..9e61534c3236 100644 --- a/numpy/testing/overrides.py +++ b/numpy/testing/overrides.py @@ -22,7 +22,7 @@ def get_overridable_numpy_ufuncs(): ufuncs = {obj for obj in _umath.__dict__.values() if isinstance(obj, _ufunc)} return ufuncs - + def allows_array_ufunc_override(func): """Determine if a function can be overridden via `__array_ufunc__` @@ -44,7 +44,7 @@ def allows_array_ufunc_override(func): will work correctly for ufuncs defined outside of Numpy. """ - return isinstance(func, np.ufunc) + return isinstance(func, _ufunc) def get_overridable_numpy_array_functions(): @@ -63,7 +63,7 @@ def get_overridable_numpy_array_functions(): """ # 'import numpy' doesn't import recfunctions, so make sure it's imported # so ufuncs defined there show up in the ufunc listing - from numpy.lib import recfunctions + from numpy.lib import recfunctions # noqa: F401 return _array_functions.copy() def allows_array_function_override(func): diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi new file mode 100644 index 000000000000..3fefc3f350da --- /dev/null +++ b/numpy/testing/overrides.pyi @@ -0,0 +1,11 @@ +from collections.abc import Callable, Hashable +from typing import Any + +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 000000000000..e6430304675e --- /dev/null +++ b/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,27 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic + +from typing_extensions import Self, TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 3983ec902356..df9fce8fd79a 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1689,7 +1689,7 @@ def warn(category): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") - warn(UserWarning) # should be supppressed + warn(UserWarning) # should be suppressed warn(RuntimeWarning) assert_equal(len(w), 1) @@ -1791,7 +1791,7 @@ def test_tempdir(): raised = False try: with tempdir() as tdir: - raise ValueError() + raise ValueError except ValueError: raised = True assert_(raised) @@ -1807,7 +1807,7 @@ def test_temppath(): raised = False try: with temppath() as fpath: - raise ValueError() + raise ValueError except ValueError: raised = True assert_(raised) diff --git a/numpy/tests/test_lazyloading.py b/numpy/tests/test_lazyloading.py index f31a4eab79d0..1298fadc5618 100644 --- a/numpy/tests/test_lazyloading.py +++ b/numpy/tests/test_lazyloading.py @@ -1,5 +1,4 @@ import sys -import importlib from importlib.util import LazyLoader, find_spec, module_from_spec import pytest @@ -27,7 +26,7 @@ def test_lazy_load(): np = module # test a subpackage import - from numpy.lib import recfunctions + from numpy.lib import recfunctions # noqa: F401 # test triggering the import of the package np.ndarray diff --git a/numpy/tests/test_numpy_config.py b/numpy/tests/test_numpy_config.py index 82c1ad70b930..0e225b2bd7b4 100644 --- a/numpy/tests/test_numpy_config.py +++ b/numpy/tests/test_numpy_config.py @@ -3,7 +3,7 @@ """ import numpy as np import pytest -from unittest.mock import Mock, patch +from unittest.mock import patch pytestmark = pytest.mark.skipif( not hasattr(np.__config__, "_built_with_meson"), @@ -28,7 +28,7 @@ def test_dict_mode(self): config = np.show_config(mode="dicts") assert isinstance(config, dict) - assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), ( + assert all(key in config for key in self.REQUIRED_CONFIG_KEYS), ( "Required key missing," " see index of `False` with `REQUIRED_CONFIG_KEYS`" ) diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index eb96560b9c9a..b25818c62d31 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -1,3 +1,4 @@ +import functools import sys import sysconfig import subprocess @@ -38,7 +39,6 @@ def test_numpy_namespace(): # We override dir to not show these members allowlist = { 'recarray': 'numpy.rec.recarray', - 'show_config': 'numpy.__config__.show', } bad_results = check_dir(np) # pytest gives better error messages with the builtin assert than with @@ -536,7 +536,7 @@ def test_core_shims_coherence(): if ( member_name.startswith("_") or member_name in ["tests", "strings"] - or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES + or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES ): continue @@ -680,3 +680,131 @@ def test_functions_single_location(): del visited_functions, visited_modules, functions_original_paths assert len(duplicated_functions) == 0, duplicated_functions + + +def test___module___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not in a skip module list + member_name not in [ + "char", "core", "ctypeslib", "f2py", "ma", "lapack_lite", + "mrecords", "testing", "tests", "polynomial", "typing", + "mtrand", "bit_generator", + ] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + member.__module__ != module.__name__ and + member not in visited_functions + ): + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", "equal", "not_equal", "greater", "greater_equal", + "less", "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # recarray and record are exported in np and np.rec + if ( + (member.__name__ == "recarray" and module.__name__ == "numpy") or + (member.__name__ == "record" and module.__name__ == "numpy.rec") + ): + continue + + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + + incorrect_entries.append( + dict( + Func=member.__name__, + actual=member.__module__, + expected=module.__name__, + ) + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries + + +def _check___qualname__(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + ( + # for bound methods check qualname match + module_name.startswith("numpy.random") and + actual_obj.__qualname__ == qualname + ) + ) + + +def test___qualname___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in [ + "f2py", "ma", "tests", "testing", "typing", + "bit_generator", "ctypeslib", "lapack_lite", + ] and # skip modules + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check___qualname__(member) and + member not in visited_functions + ): + incorrect_entries.append( + dict( + actual=member.__qualname__, expected=member.__name__, + ) + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 63f063ccc795..ce9b0d9582ad 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -33,11 +33,13 @@ from __future__ import annotations -from collections.abc import Iterable from typing import Final, TYPE_CHECKING, Callable import numpy as np +if TYPE_CHECKING: + from collections.abc import Iterable + try: import mypy.types from mypy.types import Type @@ -69,9 +71,10 @@ def _get_precision_dict() -> dict[str, str]: ("_NBitLongDouble", np.longdouble), ] ret = {} + module = "numpy._typing" for name, typ in names: n: int = 8 * typ().dtype.itemsize - ret[f'numpy._typing._nbit.{name}'] = f"numpy._{n}Bit" + ret[f'{module}._nbit.{name}'] = f"{module}._nbit_base._{n}Bit" return ret @@ -92,7 +95,6 @@ def _get_extended_precision_list() -> list[str]: ] return [i for i in extended_names if hasattr(np, i)] - def _get_c_intp_name() -> str: # Adapted from `np.core._internal._getintp_ctype` char = np.dtype('n').char @@ -113,7 +115,7 @@ def _get_c_intp_name() -> str: #: A list with the names of all extended precision `np.number` subclasses. _EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() -#: The name of the ctypes quivalent of `np.intp` +#: The name of the ctypes equivalent of `np.intp` _C_INTP: Final = _get_c_intp_name() diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index d6ff59fc4756..3d250c493cfb 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -10,7 +10,7 @@ td = np.timedelta64(0, "D") AR_b: npt.NDArray[np.bool] AR_u: npt.NDArray[np.uint32] AR_i: npt.NDArray[np.int64] -AR_f: npt.NDArray[np.float64] +AR_f: npt.NDArray[np.longdouble] AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index f8c8a3237816..486c11e79868 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -6,11 +6,11 @@ import numpy.typing as npt AR: npt.NDArray[np.float64] func1: Callable[[Any], str] -func2: Callable[[np.integer[Any]], str] +func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # E: Unexpected keyword argument -np.array2string(AR, legacy="1.14") # E: incompatible type -np.array2string(AR, sign="*") # E: incompatible type -np.array2string(AR, floatmode="default") # E: incompatible type -np.array2string(AR, formatter={"A": func1}) # E: incompatible type -np.array2string(AR, formatter={"float": func2}) # E: Incompatible types +np.array2string(AR, style=None) # E: No overload variant +np.array2string(AR, legacy="1.14") # E: No overload variant +np.array2string(AR, sign="*") # E: No overload variant +np.array2string(AR, floatmode="default") # E: No overload variant +np.array2string(AR, formatter={"A": func1}) # E: No overload variant +np.array2string(AR, formatter={"float": func2}) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/chararray.pyi b/numpy/typing/tests/data/fail/chararray.pyi index d334f689d121..e484b644e4b8 100644 --- a/numpy/typing/tests/data/fail/chararray.pyi +++ b/numpy/typing/tests/data/fail/chararray.pyi @@ -1,8 +1,7 @@ import numpy as np -from typing import Any -AR_U: np.char.chararray[Any, np.dtype[np.str_]] -AR_S: np.char.chararray[Any, np.dtype[np.bytes_]] +AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]] AR_S.encode() # E: Invalid self argument AR_U.decode() # E: Invalid self argument diff --git a/numpy/typing/tests/data/fail/false_positives.pyi b/numpy/typing/tests/data/fail/false_positives.pyi deleted file mode 100644 index 7e79230663c2..000000000000 --- a/numpy/typing/tests/data/fail/false_positives.pyi +++ /dev/null @@ -1,11 +0,0 @@ -import numpy as np -import numpy.typing as npt - -AR_f8: npt.NDArray[np.float64] - -# NOTE: Mypy bug presumably due to the special-casing of heterogeneous tuples; -# xref numpy/numpy#20901 -# -# The expected output should be no different than, e.g., when using a -# list instead of a tuple -np.concatenate(([1], AR_f8)) # E: Argument 1 to "concatenate" has incompatible type diff --git a/numpy/typing/tests/data/fail/fromnumeric.pyi b/numpy/typing/tests/data/fail/fromnumeric.pyi index accddaf8c3bc..fb666986a7e0 100644 --- a/numpy/typing/tests/data/fail/fromnumeric.pyi +++ b/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -6,6 +6,7 @@ import numpy.typing as npt A = np.array(True, ndmin=2, dtype=bool) A.setflags(write=False) AR_U: npt.NDArray[np.str_] +AR_M: npt.NDArray[np.datetime64] a = np.bool(True) @@ -88,6 +89,8 @@ np.trace(A, axis2=[]) # E: No overload variant np.ravel(a, order="bob") # E: No overload variant +np.nonzero(0) # E: No overload variant + np.compress( # E: No overload variant [True], A, axis=1.0 ) @@ -147,6 +150,7 @@ np.mean(a, axis=1.0) # E: No overload variant np.mean(a, out=False) # E: No overload variant np.mean(a, keepdims=1.0) # E: No overload variant np.mean(AR_U) # E: incompatible type +np.mean(AR_M) # E: incompatible type np.std(a, axis=1.0) # E: No overload variant np.std(a, out=False) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi index dccb3dbb0632..de4e56b07ba1 100644 --- a/numpy/typing/tests/data/fail/lib_function_base.pyi +++ b/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -8,8 +8,10 @@ AR_c16: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] +AR_b_list: list[npt.NDArray[np.bool]] -def func(a: int) -> None: ... +def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... +def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... np.average(AR_m) # E: incompatible type np.select(1, [AR_f8]) # E: incompatible type @@ -21,6 +23,15 @@ np.place(1, [True], 1.5) # E: incompatible type np.vectorize(1) # E: incompatible type np.place(AR_f8, slice(None), 5) # E: incompatible type +np.piecewise(AR_f8, True, [fn_ar_i], 42) # E: No overload variants +# TODO: enable these once mypy actually supports ParamSpec (released in 2021) +# NOTE: pyright correctly reports errors for these (`reportCallIssue`) +# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # E: No overload variants +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # E: No overload variant +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # E: No overload variant +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # E: No overload variant +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # E: No overload variant + np.interp(AR_f8, AR_c16, AR_f8) # E: incompatible type np.interp(AR_c16, AR_f8, AR_f8) # E: incompatible type np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/modules.pyi b/numpy/typing/tests/data/fail/modules.pyi index c86627e0c8ea..541be15b24ae 100644 --- a/numpy/typing/tests/data/fail/modules.pyi +++ b/numpy/typing/tests/data/fail/modules.pyi @@ -13,6 +13,5 @@ np.math # E: Module has no attribute # e.g. one must first execute `import numpy.lib.recfunctions` np.lib.recfunctions # E: Module has no attribute -np.__NUMPY_SETUP__ # E: Module has no attribute np.__deprecated_attrs__ # E: Module has no attribute np.__expired_functions__ # E: Module has no attribute diff --git a/numpy/typing/tests/data/fail/npyio.pyi b/numpy/typing/tests/data/fail/npyio.pyi index 95b6c426697c..6ba6a6be1797 100644 --- a/numpy/typing/tests/data/fail/npyio.pyi +++ b/numpy/typing/tests/data/fail/npyio.pyi @@ -12,7 +12,9 @@ AR_i8: npt.NDArray[np.int64] np.load(str_file) # E: incompatible type -np.save(bytes_path, AR_i8) # E: incompatible type +np.save(bytes_path, AR_i8) # E: No overload variant +# https://github.com/python/mypy/issues/16111 +# np.save(str_path, AR_i8, fix_imports=True) # W: deprecated np.savez(bytes_path, AR_i8) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index e65e111c3a65..e847d8d6c45a 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -28,7 +28,6 @@ np.float32(3j) # E: incompatible type np.float32([1.0, 0.0, 0.0]) # E: incompatible type np.complex64([]) # E: incompatible type -np.complex64(1, 2) # E: Too many arguments # TODO: protocols (can't check for non-existent protocols w/ __getattr__) np.datetime64(0) # E: No overload variant @@ -60,7 +59,7 @@ np.flexible(b"test") # E: Cannot instantiate abstract class np.float64(value=0.0) # E: Unexpected keyword argument np.int64(value=0) # E: Unexpected keyword argument np.uint64(value=0) # E: Unexpected keyword argument -np.complex128(value=0.0j) # E: Unexpected keyword argument +np.complex128(value=0.0j) # E: No overload variant np.str_(value='bob') # E: No overload variant np.bytes_(value=b'test') # E: No overload variant np.void(value=b'test') # E: No overload variant @@ -82,8 +81,6 @@ def func(a: np.float32) -> None: ... func(f2) # E: incompatible type func(f8) # E: incompatible type -round(c8) # E: No overload variant - c8.__getnewargs__() # E: Invalid self argument f2.__getnewargs__() # E: Invalid self argument f2.hex() # E: Invalid self argument diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index 66fcf6b23f5d..e284501c9d67 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -39,11 +39,6 @@ np.strings.partition(AR_S, "a") # E: incompatible type np.strings.rpartition(AR_U, b"a") # E: incompatible type np.strings.rpartition(AR_S, "a") # E: incompatible type -np.strings.split(AR_U, b"_") # E: incompatible type -np.strings.split(AR_S, "_") # E: incompatible type -np.strings.rsplit(AR_U, b"_") # E: incompatible type -np.strings.rsplit(AR_S, "_") # E: incompatible type - np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # E: incompatible type np.strings.count(AR_S, "a", 0, 9) # E: incompatible type diff --git a/numpy/typing/tests/data/fail/testing.pyi b/numpy/typing/tests/data/fail/testing.pyi index 803870e2fead..953670180203 100644 --- a/numpy/typing/tests/data/fail/testing.pyi +++ b/numpy/typing/tests/data/fail/testing.pyi @@ -23,6 +23,6 @@ np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type np.testing.assert_warns(warning_class=RuntimeWarning, func=func) # E: No overload variant np.testing.assert_no_warnings(func=func) # E: No overload variant np.testing.assert_no_warnings(func, None) # E: Too many arguments -np.testing.assert_no_warnings(func, test=None) # E: Unexpected keyword argument +np.testing.assert_no_warnings(func, test=None) # E: No overload variant np.testing.assert_no_gc_cycles(func=func) # E: No overload variant diff --git a/numpy/typing/tests/data/mypy.ini b/numpy/typing/tests/data/mypy.ini index 7553012050c7..3bd7887c1209 100644 --- a/numpy/typing/tests/data/mypy.ini +++ b/numpy/typing/tests/data/mypy.ini @@ -5,3 +5,6 @@ implicit_reexport = False pretty = True disallow_any_unimported = True disallow_any_generics = True +; https://github.com/python/mypy/issues/15313 +disable_bytearray_promotion = true +disable_memoryview_promotion = true diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 4ac4e957445c..93fda1d291c0 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -2,6 +2,7 @@ from typing import Any import numpy as np +import numpy.typing as npt import pytest c16 = np.complex128(1) @@ -57,14 +58,14 @@ def __rpow__(self, value: Any) -> Object: return self -AR_b: np.ndarray[Any, np.dtype[np.bool]] = np.array([True]) -AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) -AR_i: np.ndarray[Any, np.dtype[np.int64]] = np.array([1]) -AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) -AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1j]) -AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64(1, "D")]) -AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64(1, "D")]) -AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([Object()]) +AR_b: npt.NDArray[np.bool] = np.array([True]) +AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) +AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_f: npt.NDArray[np.float64] = np.array([1.0]) +AR_c: npt.NDArray[np.complex128] = np.array([1j]) +AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) +AR_M: npt.NDArray[np.datetime64] = np.array([np.datetime64(1, "D")]) +AR_O: npt.NDArray[np.object_] = np.array([Object()]) AR_LIKE_b = [True] AR_LIKE_u = [np.uint32(1)] diff --git a/numpy/typing/tests/data/pass/array_like.py b/numpy/typing/tests/data/pass/array_like.py index 822e6a1d4bed..730eb46d1c92 100644 --- a/numpy/typing/tests/data/pass/array_like.py +++ b/numpy/typing/tests/data/pass/array_like.py @@ -1,9 +1,11 @@ from __future__ import annotations -from typing import Any +from typing import Any, TYPE_CHECKING import numpy as np -from numpy._typing import NDArray, ArrayLike, _SupportsArray + +if TYPE_CHECKING: + from numpy._typing import NDArray, ArrayLike, _SupportsArray x1: ArrayLike = True x2: ArrayLike = 5 diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 000000000000..62b7e85d7ff1 --- /dev/null +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py index 5ef8122d1195..2238618eb67c 100644 --- a/numpy/typing/tests/data/pass/literal.py +++ b/numpy/typing/tests/data/pass/literal.py @@ -1,12 +1,14 @@ from __future__ import annotations -from typing import Any +from typing import Any, TYPE_CHECKING from functools import partial -from collections.abc import Callable import pytest import numpy as np +if TYPE_CHECKING: + from collections.abc import Callable + AR = np.array(0) AR.setflags(write=False) @@ -23,15 +25,16 @@ (KACF, AR.flatten), (KACF, AR.ravel), (KACF, partial(np.array, 1)), - (CF, partial(np.zeros, 1)), - (CF, partial(np.ones, 1)), - (CF, partial(np.empty, 1)), + # NOTE: __call__ is needed due to mypy 1.11 bugs (#17620, #17631) + (CF, partial(np.zeros.__call__, 1)), + (CF, partial(np.ones.__call__, 1)), + (CF, partial(np.empty.__call__, 1)), (CF, partial(np.full, 1, 1)), (KACF, partial(np.zeros_like, AR)), (KACF, partial(np.ones_like, AR)), (KACF, partial(np.empty_like, AR)), (KACF, partial(np.full_like, AR, 1)), - (KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__ + (KACF, partial(np.add.__call__, 1, 1)), # i.e. np.ufunc.__call__ (ACF, partial(np.reshape, AR, 1)), (KACF, partial(np.ravel, AR)), (KACF, partial(np.asarray, 1)), diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 7b8ebea52a16..fef9d519b78b 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -174,3 +174,10 @@ class SubClass(npt.NDArray[np.float64]): ... complex(np.array(1.0, dtype=np.float64)) operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] diff --git a/numpy/typing/tests/data/pass/nditer.py b/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 000000000000..25a5b44d7aec --- /dev/null +++ b/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) diff --git a/numpy/typing/tests/data/pass/numeric.py b/numpy/typing/tests/data/pass/numeric.py index 7f8f92973901..4e12fb5d70e6 100644 --- a/numpy/typing/tests/data/pass/numeric.py +++ b/numpy/typing/tests/data/pass/numeric.py @@ -6,6 +6,7 @@ """ from __future__ import annotations +from typing import cast import numpy as np import numpy.typing as npt @@ -15,7 +16,10 @@ class SubClass(npt.NDArray[np.float64]): i8 = np.int64(1) -A = np.arange(27).reshape(3, 3, 3) +A = cast( + np.ndarray[tuple[int, int, int], np.dtype[np.intp]], + np.arange(27).reshape(3, 3, 3), +) B: list[list[list[int]]] = A.tolist() C = np.empty((27, 27)).view(SubClass) diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 69afb28c48ec..bce204a7378e 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -1493,5 +1493,5 @@ random_st.tomaxint(1) random_st.tomaxint((1,)) -np.random.set_bit_generator(SEED_PCG64) -np.random.get_bit_generator() +np.random.mtrand.set_bit_generator(SEED_PCG64) +np.random.mtrand.get_bit_generator() diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 000000000000..03322e064be4 --- /dev/null +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,162 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any +from typing_extensions import assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((int(3),), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((int(2),), np.int_), + np.ones((int(3),), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((int(2),), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) + zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[Any, np.dtype[np.void]], + np.ndarray[Any, np.dtype[np.int_]], + ], + ) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 53caf7ff817d..89f24cb92991 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -1,4 +1,3 @@ -import sys import datetime as dt import pytest @@ -90,9 +89,18 @@ def __float__(self) -> float: np.datetime64("2019") np.datetime64(b"2019") np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) np.datetime64(np.datetime64()) np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") np.datetime64(None) np.datetime64(None, "D") diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py index 8e2e2faad9a8..ab1ae3d9bc79 100644 --- a/numpy/typing/tests/data/pass/shape.py +++ b/numpy/typing/tests/data/pass/shape.py @@ -1,7 +1,6 @@ -from typing import Any, NamedTuple +from typing import Any, NamedTuple, cast import numpy as np -from typing_extensions import assert_type # Subtype of tuple[int, int] @@ -9,7 +8,11 @@ class XYGrid(NamedTuple): x_axis: int y_axis: int -arr: np.ndarray[XYGrid, Any] = np.empty(XYGrid(2, 2)) +# TODO: remove this cast after: https://github.com/numpy/numpy/pull/27171 +arr: np.ndarray[XYGrid, Any] = cast( + np.ndarray[XYGrid, Any], + np.empty(XYGrid(2, 2)), +) # Test variance of _ShapeType_co def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 1337bd52860a..8f44e6e76f83 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -61,7 +61,7 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: iterable_func(array) -[element for element in array] +list(array) iter(array) zip(array, array) array[1] @@ -71,8 +71,13 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_2d = np.ones((3, 3)) array_2d[:2, :2] -array_2d[..., 0] array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) # Other special methods len(array) @@ -80,8 +85,7 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_scalar = np.array(1) int(array_scalar) float(array_scalar) -# currently does not work due to https://github.com/python/typeshed/issues/1904 -# complex(array_scalar) +complex(array_scalar) bytes(array_scalar) operator.index(array_scalar) bool(array_scalar) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 003affe02385..46ac003508c4 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -1,37 +1,46 @@ -import sys +import datetime as dt from typing import Any import numpy as np import numpy.typing as npt from numpy._typing import _32Bit,_64Bit, _128Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type + +b: bool +c: complex +f: float +i: int + +c16: np.complex128 +c8: np.complex64 # Can't directly import `np.float128` as it is not available on all platforms f16: np.floating[_128Bit] +f8: np.float64 +f4: np.float32 -c16 = np.complex128() -f8 = np.float64() -i8 = np.int64() -u8 = np.uint64() +i8: np.int64 +i4: np.int32 -c8 = np.complex64() -f4 = np.float32() -i4 = np.int32() -u4 = np.uint32() +u8: np.uint64 +u4: np.uint32 -dt = np.datetime64(0, "D") -td = np.timedelta64(0, "D") +b_: np.bool -b_ = np.bool() +M8: np.datetime64 +M8_none: np.datetime64[None] +M8_date: np.datetime64[dt.date] +M8_time: np.datetime64[dt.datetime] +M8_int: np.datetime64[int] +date: dt.date +time: dt.datetime -b = bool() -c = complex() -f = float() -i = int() +m8: np.timedelta64 +m8_none: np.timedelta64[None] +m8_int: np.timedelta64[int] +m8_delta: np.timedelta64[dt.timedelta] +delta: dt.timedelta AR_b: npt.NDArray[np.bool] AR_u: npt.NDArray[np.uint32] @@ -42,6 +51,7 @@ AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_number: npt.NDArray[np.number[Any]] +AR_Any: npt.NDArray[Any] AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] @@ -52,18 +62,19 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] + # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]]) -assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_b - AR_LIKE_O, Any) -assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]]) @@ -71,7 +82,7 @@ assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_b, Any) -assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]]) @@ -79,7 +90,7 @@ assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_u - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]]) @@ -88,7 +99,7 @@ assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_u, Any) -assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]]) @@ -96,7 +107,7 @@ assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_i - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]]) @@ -105,32 +116,32 @@ assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_i, Any) -assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_f - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_LIKE_O - AR_f, Any) -assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) assert_type(AR_c - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) assert_type(AR_LIKE_O - AR_c, Any) assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) @@ -177,53 +188,53 @@ assert_type(AR_LIKE_O - AR_O, Any) # Array floor division assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) -assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_b // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) -assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_O // AR_b, Any) -assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_u // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_u, Any) -assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_i // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_i, Any) -assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_f, Any) @@ -263,7 +274,10 @@ assert_type(-i8, np.int64) assert_type(-i4, np.int32) assert_type(-u8, np.uint64) assert_type(-u4, np.uint32) -assert_type(-td, np.timedelta64) +assert_type(-m8, np.timedelta64) +assert_type(-m8_none, np.timedelta64[None]) +assert_type(-m8_int, np.timedelta64[int]) +assert_type(-m8_delta, np.timedelta64[dt.timedelta]) assert_type(-AR_f, npt.NDArray[np.float64]) assert_type(+f16, np.floating[_128Bit]) @@ -275,7 +289,9 @@ assert_type(+i8, np.int64) assert_type(+i4, np.int32) assert_type(+u8, np.uint64) assert_type(+u4, np.uint32) -assert_type(+td, np.timedelta64) +assert_type(+m8_none, np.timedelta64[None]) +assert_type(+m8_int, np.timedelta64[int]) +assert_type(+m8_delta, np.timedelta64[dt.timedelta]) assert_type(+AR_f, npt.NDArray[np.float64]) assert_type(abs(f16), np.floating[_128Bit]) @@ -287,33 +303,66 @@ assert_type(abs(i8), np.int64) assert_type(abs(i4), np.int32) assert_type(abs(u8), np.uint64) assert_type(abs(u4), np.uint32) -assert_type(abs(td), np.timedelta64) +assert_type(abs(m8), np.timedelta64) +assert_type(abs(m8_none), np.timedelta64[None]) +assert_type(abs(m8_int), np.timedelta64[int]) +assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) assert_type(abs(b_), np.bool) # Time structures -assert_type(dt + td, np.datetime64) -assert_type(dt + i, np.datetime64) -assert_type(dt + i4, np.datetime64) -assert_type(dt + i8, np.datetime64) -assert_type(dt - dt, np.timedelta64) -assert_type(dt - i, np.datetime64) -assert_type(dt - i4, np.datetime64) -assert_type(dt - i8, np.datetime64) - -assert_type(td + td, np.timedelta64) -assert_type(td + i, np.timedelta64) -assert_type(td + i4, np.timedelta64) -assert_type(td + i8, np.timedelta64) -assert_type(td - td, np.timedelta64) -assert_type(td - i, np.timedelta64) -assert_type(td - i4, np.timedelta64) -assert_type(td - i8, np.timedelta64) -assert_type(td / f, np.timedelta64) -assert_type(td / f4, np.timedelta64) -assert_type(td / f8, np.timedelta64) -assert_type(td / td, np.float64) -assert_type(td // td, np.int64) +assert_type(M8 + m8, np.datetime64) +assert_type(M8 + i, np.datetime64) +assert_type(M8 + i8, np.datetime64) +assert_type(M8 - M8, np.timedelta64) +assert_type(M8 - i, np.datetime64) +assert_type(M8 - i8, np.datetime64) + +assert_type(M8_none + m8, np.datetime64[None]) +assert_type(M8_none + i, np.datetime64[None]) +assert_type(M8_none + i8, np.datetime64[None]) +assert_type(M8_none - M8, np.timedelta64[None]) +assert_type(M8_none - m8, np.datetime64[None]) +assert_type(M8_none - i, np.datetime64[None]) +assert_type(M8_none - i8, np.datetime64[None]) + +assert_type(m8 + m8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) +assert_type(m8 + i8, np.timedelta64) +assert_type(m8 - m8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) +assert_type(m8 - i8, np.timedelta64) +assert_type(m8 * f, np.timedelta64) +assert_type(m8 * f4, np.timedelta64) +assert_type(m8 * np.True_, np.timedelta64) +assert_type(m8 / f, np.timedelta64) +assert_type(m8 / f4, np.timedelta64) +assert_type(m8 / m8, np.float64) +assert_type(m8 // m8, np.int64) +assert_type(m8 % m8, np.timedelta64) +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) + +assert_type(m8_none + m8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) +assert_type(m8_none + i8, np.timedelta64[None]) +assert_type(m8_none - i, np.timedelta64[None]) +assert_type(m8_none - i8, np.timedelta64[None]) + +assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + m8_delta, np.timedelta64[int]) +assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int - m8_delta, np.timedelta64[int]) +assert_type(m8_int - m8, np.timedelta64[int | None]) + +assert_type(m8_delta + date, dt.date) +assert_type(m8_delta + time, dt.datetime) +assert_type(m8_delta + delta, dt.timedelta) +assert_type(m8_delta - delta, dt.timedelta) +assert_type(m8_delta / delta, float) +assert_type(m8_delta // delta, int) +assert_type(m8_delta % delta, dt.timedelta) +assert_type(divmod(m8_delta, delta), tuple[int, dt.timedelta]) # boolean @@ -349,168 +398,172 @@ assert_type(c8 / b_, np.complex64) # Complex -assert_type(c16 + f16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit]) +assert_type(c16 + f16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) assert_type(c16 + c16, np.complex128) assert_type(c16 + f8, np.complex128) assert_type(c16 + i8, np.complex128) -assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c16 + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c16 + i4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c16 + c8, np.complex128) +assert_type(c16 + f4, np.complex128) +assert_type(c16 + i4, np.complex128) assert_type(c16 + b_, np.complex128) assert_type(c16 + b, np.complex128) assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) -assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) -assert_type(f16 + c16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit]) +assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) assert_type(i8 + c16, np.complex128) -assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(i4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complex128 | np.complex64) +assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) -assert_type(AR_f + c16, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) -assert_type(c8 + f16, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit]) -assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + f8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + i8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + f8, np.complex64 | np.complex128) +assert_type(c8 + i8, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_64Bit, _64Bit]) assert_type(c8 + c8, np.complex64) assert_type(c8 + f4, np.complex64) assert_type(c8 + i4, np.complex64) assert_type(c8 + b_, np.complex64) assert_type(c8 + b, np.complex64) -assert_type(c8 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + f, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(c8 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) - -assert_type(f16 + c8, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit]) -assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(i8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) +assert_type(c8 + c, np.complex64 | np.complex128) +assert_type(c8 + f, np.complex64 | np.complex128) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) + +assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) +assert_type(c16 + c8, np.complex128) +assert_type(f8 + c8, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c8, np.complexfloating[_64Bit, _64Bit] | np.complex64) assert_type(c8 + c8, np.complex64) assert_type(f4 + c8, np.complex64) assert_type(i4 + c8, np.complex64) assert_type(b_ + c8, np.complex64) assert_type(b + c8, np.complex64) -assert_type(c + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(AR_f + c8, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c + c8, np.complex64 | np.complex128) +assert_type(f + c8, np.complex64 | np.complex128) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float -assert_type(f8 + f16, np.floating[_64Bit | _128Bit]) +assert_type(f8 + f16, np.float64| np.floating[_128Bit]) assert_type(f8 + f8, np.float64) assert_type(f8 + i8, np.float64) -assert_type(f8 + f4, np.floating[_32Bit | _64Bit]) -assert_type(f8 + i4, np.floating[_32Bit | _64Bit]) +assert_type(f8 + f4, np.float64) +assert_type(f8 + i4, np.float64) assert_type(f8 + b_, np.float64) assert_type(f8 + b, np.float64) -assert_type(f8 + c, np.complex128) +assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) -assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f8, np.floating[_64Bit | _128Bit]) +assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.floating[_32Bit | _64Bit]) -assert_type(i4 + f8, np.floating[_32Bit | _64Bit]) +assert_type(f4 + f8, np.float32 | np.float64) +assert_type(i4 + f8,np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) -assert_type(c + f8, np.complex128) +assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) -assert_type(AR_f + f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + f8, npt.NDArray[np.float64]) -assert_type(f4 + f16, np.floating[_32Bit | _128Bit]) -assert_type(f4 + f8, np.floating[_32Bit | _64Bit]) -assert_type(f4 + i8, np.floating[_32Bit | _64Bit]) +assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) +assert_type(f4 + f8, np.float32 | np.float64) +assert_type(f4 + i8, np.float32 | np.floating[_64Bit]) assert_type(f4 + f4, np.float32) assert_type(f4 + i4, np.float32) assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) -assert_type(f4 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f4 + f, np.floating[_32Bit | _64Bit]) -assert_type(f4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f4 + c, np.complex64 | np.complex128) +assert_type(f4 + f, np.float32 | np.float64) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) -assert_type(f16 + f4, np.floating[_32Bit | _128Bit]) -assert_type(f8 + f4, np.floating[_32Bit | _64Bit]) -assert_type(i8 + f4, np.floating[_32Bit | _64Bit]) +assert_type(f16 + f4, np.floating[_128Bit] | np.float32) +assert_type(f8 + f4, np.float64) +assert_type(i8 + f4, np.floating[_32Bit] | np.floating[_64Bit]) assert_type(f4 + f4, np.float32) assert_type(i4 + f4, np.float32) assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) -assert_type(c + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit]) -assert_type(f + f4, np.floating[_32Bit | _64Bit]) -assert_type(AR_f + f4, npt.NDArray[np.floating[Any]]) +assert_type(c + f4, np.complex64 | np.complex128) +assert_type(f + f4, np.float64 | np.float32) +assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int assert_type(i8 + i8, np.int64) assert_type(i8 + u8, Any) -assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) assert_type(i8 + f, np.float64) -assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) assert_type(u8 + f, np.float64) -assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) -assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) assert_type(c + i8, np.complex128) assert_type(f + i8, np.float64) -assert_type(AR_f + i8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(i4 + u8, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) -assert_type(AR_f + u8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u8, npt.NDArray[np.float64]) -assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit]) +assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) -assert_type(i4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) -assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) -assert_type(u4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) -assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) -assert_type(AR_f + i4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i4, npt.NDArray[np.float64]) assert_type(i8 + u4, Any) assert_type(i4 + u4, Any) -assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit]) +assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) -assert_type(AR_f + u4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/array_api_info.pyi b/numpy/typing/tests/data/reveal/array_api_info.pyi index b7dd2b934aec..e4110b7344e2 100644 --- a/numpy/typing/tests/data/reveal/array_api_info.pyi +++ b/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -1,12 +1,8 @@ -import sys from typing import Literal import numpy as np -if sys.version_info >= (3, 11): - from typing import Never, assert_type -else: - from typing_extensions import Never, assert_type +from typing_extensions import Never, assert_type info = np.__array_namespace_info__() diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 2559acbd0e94..c6d56ab0de2d 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -1,15 +1,12 @@ import sys -from typing import Any, TypeVar +from typing import Any, Literal as L, TypeVar from pathlib import Path from collections import deque import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type _SCT = TypeVar("_SCT", bound=np.generic, covariant=True) @@ -52,7 +49,7 @@ assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) assert_type(np.concatenate(A), npt.NDArray[np.float64]) -assert_type(np.concatenate([A, A]), Any) +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) @@ -108,18 +105,18 @@ assert_type(np.frombuffer(A), npt.NDArray[np.float64]) assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) -assert_type(np.arange(False, True), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.arange(10), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.arange(0, 10, step=2), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.arange(10.0), npt.NDArray[np.floating[Any]]) -assert_type(np.arange(start=0, stop=10.0), npt.NDArray[np.floating[Any]]) -assert_type(np.arange(np.timedelta64(0)), npt.NDArray[np.timedelta64]) -assert_type(np.arange(0, np.timedelta64(10)), npt.NDArray[np.timedelta64]) -assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), npt.NDArray[np.datetime64]) -assert_type(np.arange(10, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.arange(0, 10, step=2, dtype=np.int16), npt.NDArray[np.int16]) -assert_type(np.arange(10, dtype=int), npt.NDArray[Any]) -assert_type(np.arange(0, 10, dtype="f8"), npt.NDArray[Any]) +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger[Any]]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger[Any]]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger[Any]]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating[Any]]]) +assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating[Any]]]) +assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype[Any]]) assert_type(np.require(A), npt.NDArray[np.float64]) assert_type(np.require(B), SubClass[np.float64]) @@ -170,15 +167,30 @@ assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any]) assert_type(np.full_like(B, i8), SubClass[np.float64]) assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ones(1), npt.NDArray[np.float64]) -assert_type(np.ones([1, 1, 1]), npt.NDArray[np.float64]) -assert_type(np.ones(5, dtype=np.int64), npt.NDArray[np.int64]) -assert_type(np.ones(5, dtype=int), npt.NDArray[Any]) - -assert_type(np.full(1, i8), npt.NDArray[Any]) -assert_type(np.full([1, 1, 1], i8), npt.NDArray[Any]) -assert_type(np.full(1, i8, dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.full(1, i8, dtype=float), npt.NDArray[Any]) +_size: int +_shape_0d: tuple[()] +_shape_1d: tuple[int] +_shape_2d: tuple[int, int] +_shape_nd: tuple[int, ...] +_shape_like: list[int] + +assert_type(np.ones(_shape_0d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.ones(_size), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) +assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) +assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) + +assert_type(np.full(_size, i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.full(_shape_2d, i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.full(_shape_like, i8), npt.NDArray[np.int64]) +assert_type(np.full(_shape_like, 42), npt.NDArray[Any]) +assert_type(np.full(_size, i8, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.full(_shape_like, 42, dtype=float), npt.NDArray[Any]) +assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype[Any]]) assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) @@ -201,19 +213,19 @@ assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[Any], ...]) assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[Any], ...]) -assert_type(np.vstack([A, A]), np.ndarray[Any, Any]) +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) assert_type(np.vstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) -assert_type(np.hstack([A, A]), np.ndarray[Any, Any]) +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) assert_type(np.hstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) -assert_type(np.stack([A, A]), Any) +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) assert_type(np.stack([A, A], dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) -assert_type(np.stack([A, A], axis=0), Any) +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) assert_type(np.stack([A, A], out=B), SubClass[np.float64]) assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/arraypad.pyi b/numpy/typing/tests/data/reveal/arraypad.pyi index f53613ba2fd4..d053dab1c76f 100644 --- a/numpy/typing/tests/data/reveal/arraypad.pyi +++ b/numpy/typing/tests/data/reveal/arraypad.pyi @@ -1,14 +1,10 @@ -import sys from collections.abc import Mapping from typing import Any, SupportsIndex import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type def mode_func( ar: npt.NDArray[np.number[Any]], diff --git a/numpy/typing/tests/data/reveal/arrayprint.pyi b/numpy/typing/tests/data/reveal/arrayprint.pyi index c4a161959547..f19f1536d416 100644 --- a/numpy/typing/tests/data/reveal/arrayprint.pyi +++ b/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -1,4 +1,3 @@ -import sys import contextlib from collections.abc import Callable from typing import Any @@ -7,10 +6,7 @@ import numpy as np import numpy.typing as npt from numpy._core.arrayprint import _FormatOptions -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR: npt.NDArray[np.int64] func_float: Callable[[np.floating[Any]], str] diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 3b0a2448fdbc..eabc7677cde9 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -1,16 +1,10 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -from numpy.lib._arraysetops_impl import ( - UniqueAllResult, UniqueCountsResult, UniqueInverseResult -) +from numpy.lib._arraysetops_impl import UniqueAllResult, UniqueCountsResult, UniqueInverseResult -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_b: npt.NDArray[np.bool] AR_i8: npt.NDArray[np.int64] @@ -29,7 +23,10 @@ assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) -assert_type(np.intersect1d(AR_f8, AR_f8, return_indices=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type( + np.intersect1d(AR_f8, AR_f8, return_indices=True), + tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], +) assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) diff --git a/numpy/typing/tests/data/reveal/arrayterator.pyi b/numpy/typing/tests/data/reveal/arrayterator.pyi index 5514bf6d773f..332e5da9bc96 100644 --- a/numpy/typing/tests/data/reveal/arrayterator.pyi +++ b/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -1,14 +1,10 @@ -import sys from typing import Any from collections.abc import Generator import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] ar_iter = np.lib.Arrayterator(AR_i8) @@ -26,8 +22,8 @@ assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) for i in ar_iter: assert_type(i, npt.NDArray[np.int64]) -assert_type(ar_iter[0], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[...], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[:], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[Any, np.dtype[np.int64]]) -assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[Any, np.dtype[np.int64]]) +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[int, ...], np.dtype[np.int64]]) diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/numpy/typing/tests/data/reveal/bitwise_ops.pyi index 1f04f4b045fe..384932a2c823 100644 --- a/numpy/typing/tests/data/reveal/bitwise_ops.pyi +++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -1,28 +1,31 @@ -import sys -from typing import Any +from typing import Any, Literal as L, TypeAlias import numpy as np import numpy.typing as npt from numpy._typing import _64Bit, _32Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type -i8 = np.int64(1) -u8 = np.uint64(1) +FalseType: TypeAlias = L[False] +TrueType: TypeAlias = L[True] -i4 = np.int32(1) -u4 = np.uint32(1) +i4: np.int32 +i8: np.int64 -b_ = np.bool(1) +u4: np.uint32 +u8: np.uint64 -b = bool(1) -i = int(1) +b_: np.bool[bool] +b0_: np.bool[FalseType] +b1_: np.bool[TrueType] -AR = np.array([0, 1, 2], dtype=np.int32) -AR.setflags(write=False) +b: bool +b0: FalseType +b1: TrueType + +i: int + +AR: npt.NDArray[np.int32] assert_type(i8 << i8, np.int64) @@ -43,11 +46,11 @@ assert_type(i4 | i4, np.int32) assert_type(i4 ^ i4, np.int32) assert_type(i4 & i4, np.int32) -assert_type(i8 << i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 >> i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 | i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 ^ i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(i8 & i4, np.signedinteger[_32Bit | _64Bit]) +assert_type(i8 << i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 >> i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 | i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 ^ i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) +assert_type(i8 & i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i8 << b_, np.int64) assert_type(i8 >> b_, np.int64) @@ -123,13 +126,45 @@ assert_type(b_ & b, np.bool) assert_type(b_ << i, np.int_) assert_type(b_ >> i, np.int_) -assert_type(b_ | i, np.int_) -assert_type(b_ ^ i, np.int_) -assert_type(b_ & i, np.int_) +assert_type(b_ | i, np.bool | np.int_) +assert_type(b_ ^ i, np.bool | np.int_) +assert_type(b_ & i, np.bool | np.int_) assert_type(~i8, np.int64) assert_type(~i4, np.int32) assert_type(~u8, np.uint64) assert_type(~u4, np.uint32) assert_type(~b_, np.bool) +assert_type(~b0_, np.bool[TrueType]) +assert_type(~b1_, np.bool[FalseType]) assert_type(~AR, npt.NDArray[np.int32]) + +assert_type(b_ | b0_, np.bool) +assert_type(b0_ | b_, np.bool) +assert_type(b_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b_, np.bool[TrueType]) + +assert_type(b_ ^ b0_, np.bool) +assert_type(b0_ ^ b_, np.bool) +assert_type(b_ ^ b1_, np.bool) +assert_type(b1_ ^ b_, np.bool) + +assert_type(b_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b_, np.bool[FalseType]) +assert_type(b_ & b1_, np.bool) +assert_type(b1_ & b_, np.bool) + +assert_type(b0_ | b0_, np.bool[FalseType]) +assert_type(b0_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b0_, np.bool[TrueType]) +assert_type(b1_ | b1_, np.bool[TrueType]) + +assert_type(b0_ ^ b0_, np.bool[FalseType]) +assert_type(b0_ ^ b1_, np.bool[TrueType]) +assert_type(b1_ ^ b0_, np.bool[TrueType]) +assert_type(b1_ ^ b1_, np.bool[FalseType]) + +assert_type(b0_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b1_, np.bool[FalseType]) +assert_type(b1_ & b0_, np.bool[FalseType]) +assert_type(b1_ & b1_, np.bool[TrueType]) diff --git a/numpy/typing/tests/data/reveal/char.pyi b/numpy/typing/tests/data/reveal/char.pyi index ab7186fadce4..19ca211bec1a 100644 --- a/numpy/typing/tests/data/reveal/char.pyi +++ b/numpy/typing/tests/data/reveal/char.pyi @@ -1,152 +1,219 @@ -import sys -from typing import Any - import numpy as np import numpy.typing as npt +import numpy._typing as np_t -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type +from typing import TypeAlias AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] +AR_T: np.ndarray[np_t._Shape, np.dtypes.StringDType] + +AR_T_alias: TypeAlias = np.ndarray[np_t._Shape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.greater(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.less(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.char.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.char.multiply(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.char.multiply(AR_T, 5), AR_T_alias) assert_type(np.char.mod(AR_U, "test"), npt.NDArray[np.str_]) assert_type(np.char.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.char.mod(AR_T, "test"), AR_T_alias) assert_type(np.char.capitalize(AR_U), npt.NDArray[np.str_]) assert_type(np.char.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.capitalize(AR_T), AR_T_alias) assert_type(np.char.center(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.char.center(AR_T, 5), AR_T_alias) assert_type(np.char.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.char.encode(AR_T), npt.NDArray[np.bytes_]) assert_type(np.char.decode(AR_S), npt.NDArray[np.str_]) assert_type(np.char.expandtabs(AR_U), npt.NDArray[np.str_]) assert_type(np.char.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.char.expandtabs(AR_T), AR_T_alias) assert_type(np.char.join(AR_U, "_"), npt.NDArray[np.str_]) assert_type(np.char.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.char.join(AR_T, "_"), AR_TU_alias) assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_T, 5), AR_T_alias) +assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + assert_type(np.char.rjust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rjust(AR_T, 5), AR_T_alias) +assert_type(np.char.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) assert_type(np.char.lstrip(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.lstrip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_T), AR_T_alias) +assert_type(np.char.lstrip(AR_T, "_"), AR_TU_alias) + assert_type(np.char.rstrip(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.rstrip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_T), AR_T_alias) +assert_type(np.char.rstrip(AR_T, "_"), AR_TU_alias) + assert_type(np.char.strip(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.strip(AR_S, chars=b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_T), AR_T_alias) +assert_type(np.char.strip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, AR_T, start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) assert_type(np.char.partition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.char.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.partition(AR_T, "\n"), AR_TU_alias) + assert_type(np.char.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rpartition(AR_T, "\n"), AR_TU_alias) assert_type(np.char.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) assert_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.char.replace(AR_T, "_", "_"), AR_TU_alias) assert_type(np.char.split(AR_U, "_"), npt.NDArray[np.object_]) assert_type(np.char.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_T, "_"), npt.NDArray[np.object_]) + assert_type(np.char.rsplit(AR_U, "_"), npt.NDArray[np.object_]) assert_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_T, "_"), npt.NDArray[np.object_]) assert_type(np.char.splitlines(AR_U), npt.NDArray[np.object_]) assert_type(np.char.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_T), npt.NDArray[np.object_]) + +assert_type(np.char.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.lower(AR_T), AR_T_alias) + +assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.upper(AR_T), AR_T_alias) assert_type(np.char.swapcase(AR_U), npt.NDArray[np.str_]) assert_type(np.char.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.swapcase(AR_T), AR_T_alias) assert_type(np.char.title(AR_U), npt.NDArray[np.str_]) assert_type(np.char.title(AR_S), npt.NDArray[np.bytes_]) - -assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) -assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.title(AR_T), AR_T_alias) assert_type(np.char.zfill(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.char.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) - -assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) -assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.zfill(AR_T, 5), AR_T_alias) assert_type(np.char.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + assert_type(np.char.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.char.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.char.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) - -assert_type(np.char.array(AR_U), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.array(AR_S, order="K"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.array("bob", copy=True), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=False), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.array(1, unicode=True), np.char.chararray[Any, np.dtype[np.str_]]) - -assert_type(np.char.asarray(AR_U), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.asarray("bob"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=False), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(np.char.asarray(1, unicode=True), np.char.chararray[Any, np.dtype[np.str_]]) +assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.char.translate(AR_T, ""), AR_T_alias) + +assert_type(np.char.array(AR_U), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) diff --git a/numpy/typing/tests/data/reveal/chararray.pyi b/numpy/typing/tests/data/reveal/chararray.pyi index 0fb621526288..116880f44356 100644 --- a/numpy/typing/tests/data/reveal/chararray.pyi +++ b/numpy/typing/tests/data/reveal/chararray.pyi @@ -1,16 +1,12 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type -AR_U: np.char.chararray[Any, np.dtype[np.str_]] -AR_S: np.char.chararray[Any, np.dtype[np.bytes_]] +AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]] assert_type(AR_U == AR_U, npt.NDArray[np.bool]) assert_type(AR_S == AR_S, npt.NDArray[np.bool]) @@ -30,46 +26,46 @@ assert_type(AR_S > AR_S, npt.NDArray[np.bool]) assert_type(AR_U < AR_U, npt.NDArray[np.bool]) assert_type(AR_S < AR_S, npt.NDArray[np.bool]) -assert_type(AR_U * 5, np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S * [5], np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U * 5, np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S * [5], np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U % "test", np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S % b"test", np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U % "test", np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S % b"test", np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.capitalize(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.capitalize(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.capitalize(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.capitalize(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.center(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.center([2, 3, 4], b"a"), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.center(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.center([2, 3, 4], b"a"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.encode(), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_S.decode(), np.char.chararray[Any, np.dtype[np.str_]]) +assert_type(AR_U.encode(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_S.decode(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) -assert_type(AR_U.expandtabs(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.expandtabs(tabsize=4), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.expandtabs(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.expandtabs(tabsize=4), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.join("_"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.join([b"_", b""]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.join("_"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.join([b"_", b""]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.ljust(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.rjust(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.ljust(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.rjust(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.lstrip(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.lstrip(chars=b"_"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.rstrip(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.rstrip(chars=b"_"), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.strip(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.strip(chars=b"_"), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.lstrip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.lstrip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.rstrip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.rstrip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.strip(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.strip(chars=b"_"), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.partition("\n"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.partition([b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) -assert_type(AR_U.rpartition("\n"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.partition("\n"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.partition([b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) +assert_type(AR_U.rpartition("\n"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.replace("_", "-"), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.replace("_", "-"), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) assert_type(AR_U.split("_"), npt.NDArray[np.object_]) assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) @@ -79,17 +75,17 @@ assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) -assert_type(AR_U.swapcase(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.swapcase(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.swapcase(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.swapcase(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.title(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.title(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.title(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.title(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.upper(), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.upper(), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.upper(), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.upper(), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) -assert_type(AR_U.zfill(5), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(AR_S.zfill([2, 3, 4]), np.char.chararray[Any, np.dtype[np.bytes_]]) +assert_type(AR_U.zfill(5), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(AR_S.zfill([2, 3, 4]), np.char.chararray[tuple[int, ...], np.dtype[np.bytes_]]) assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 034efbef377e..b71ef1d1b79f 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -1,4 +1,3 @@ -import sys import fractions import decimal from typing import Any @@ -6,10 +5,7 @@ from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type c16 = np.complex128() f8 = np.float64() diff --git a/numpy/typing/tests/data/reveal/constants.pyi b/numpy/typing/tests/data/reveal/constants.pyi index 5166d4f26d76..146a40cf467f 100644 --- a/numpy/typing/tests/data/reveal/constants.pyi +++ b/numpy/typing/tests/data/reveal/constants.pyi @@ -1,12 +1,8 @@ -import sys +from typing import Literal +from typing_extensions import assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - assert_type(np.e, float) assert_type(np.euler_gamma, float) assert_type(np.inf, float) @@ -14,6 +10,6 @@ assert_type(np.nan, float) assert_type(np.pi, float) assert_type(np.little_endian, bool) -assert_type(np.True_, np.bool) -assert_type(np.False_, np.bool) +assert_type(np.True_, np.bool[Literal[True]]) +assert_type(np.False_, np.bool[Literal[False]]) diff --git a/numpy/typing/tests/data/reveal/ctypeslib.pyi b/numpy/typing/tests/data/reveal/ctypeslib.pyi index 992eb4bb43b9..80928a93444c 100644 --- a/numpy/typing/tests/data/reveal/ctypeslib.pyi +++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -6,10 +6,7 @@ import numpy as np import numpy.typing as npt from numpy import ctypeslib -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_bool: npt.NDArray[np.bool] AR_ubyte: npt.NDArray[np.ubyte] diff --git a/numpy/typing/tests/data/reveal/datasource.pyi b/numpy/typing/tests/data/reveal/datasource.pyi index cc5a84852a0f..88f2b076be84 100644 --- a/numpy/typing/tests/data/reveal/datasource.pyi +++ b/numpy/typing/tests/data/reveal/datasource.pyi @@ -1,13 +1,9 @@ -import sys from pathlib import Path from typing import IO, Any import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type path1: Path path2: str diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 10f6ccd05a41..4cd6d4a11aff 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -1,18 +1,41 @@ -import sys import ctypes as ct -from typing import Any +import datetime as dt +from decimal import Decimal +from fractions import Fraction +from typing import Any, Literal, TypeAlias import numpy as np +from numpy.dtypes import StringDType -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type + +# a combination of likely `object` dtype-like candidates (no `_co`) +_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta dtype_U: np.dtype[np.str_] dtype_V: np.dtype[np.void] dtype_i8: np.dtype[np.int64] +py_int_co: type[int | bool] +py_float_co: type[float | int | bool] +py_complex_co: type[complex | float | int | bool] +py_object: type[_PyObjectLike] +py_character: type[str | bytes] +py_flexible: type[str | bytes | memoryview] + +ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] +ct_number: type[ct.c_uint8 | ct.c_float] +ct_generic: type[ct.c_bool | ct.c_char] + +cs_integer: Literal["u1", "V", "S"] +cs_generic: Literal["H", "U", "h", "|M8[Y]", "?"] + +dt_inexact: np.dtype[np.inexact[Any]] +dt_string: StringDType + + assert_type(np.dtype(np.float64), np.dtype[np.float64]) assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) assert_type(np.dtype(np.int64), np.dtype[np.int64]) @@ -27,13 +50,35 @@ assert_type(np.dtype("bytes"), np.dtype[np.bytes_]) assert_type(np.dtype("str"), np.dtype[np.str_]) # Python types -assert_type(np.dtype(complex), np.dtype[np.cdouble]) -assert_type(np.dtype(float), np.dtype[np.double]) -assert_type(np.dtype(int), np.dtype[np.int_]) assert_type(np.dtype(bool), np.dtype[np.bool]) +assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_object), np.dtype[np.object_]) assert_type(np.dtype(str), np.dtype[np.str_]) assert_type(np.dtype(bytes), np.dtype[np.bytes_]) -assert_type(np.dtype(object), np.dtype[np.object_]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +assert_type(np.dtype(memoryview), np.dtype[np.void]) +assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) + +assert_type(np.dtype(list), np.dtype[np.object_]) +assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) +assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) +assert_type(np.dtype(Decimal), np.dtype[np.object_]) +assert_type(np.dtype(Fraction), np.dtype[np.object_]) + +# char-codes +assert_type(np.dtype("u1"), np.dtype[np.uint8]) +assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("longlong"), np.dtype[np.longlong]) +assert_type(np.dtype(">g"), np.dtype[np.longdouble]) +assert_type(np.dtype(cs_integer), np.dtype[np.integer[Any]]) +assert_type(np.dtype(cs_number), np.dtype[np.number[Any]]) +assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) +assert_type(np.dtype(cs_generic), np.dtype[np.generic]) # ctypes assert_type(np.dtype(ct.c_double), np.dtype[np.double]) @@ -44,10 +89,11 @@ assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_]) assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) # Special case for None -assert_type(np.dtype(None), np.dtype[np.double]) +assert_type(np.dtype(None), np.dtype[np.float64]) -# Dtypes of dtypes +# Dypes of dtypes assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) +assert_type(np.dtype(dt_inexact), np.dtype[np.inexact[Any]]) # Parameterized dtypes assert_type(np.dtype("S8"), np.dtype[Any]) @@ -55,6 +101,13 @@ assert_type(np.dtype("S8"), np.dtype[Any]) # Void assert_type(np.dtype(("U", 10)), np.dtype[np.void]) +# StringDType +assert_type(np.dtype(dt_string), StringDType) +assert_type(np.dtype("T"), StringDType) +assert_type(np.dtype("=T"), StringDType) +assert_type(np.dtype("|T"), StringDType) + + # Methods and attributes assert_type(dtype_U.base, np.dtype[Any]) assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]]) diff --git a/numpy/typing/tests/data/reveal/einsumfunc.pyi b/numpy/typing/tests/data/reveal/einsumfunc.pyi index 645aaad31cf1..6dc44e23bda0 100644 --- a/numpy/typing/tests/data/reveal/einsumfunc.pyi +++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] diff --git a/numpy/typing/tests/data/reveal/emath.pyi b/numpy/typing/tests/data/reveal/emath.pyi index d1027bf48d50..cc6579cf3b33 100644 --- a/numpy/typing/tests/data/reveal/emath.pyi +++ b/numpy/typing/tests/data/reveal/emath.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] diff --git a/numpy/typing/tests/data/reveal/false_positives.pyi b/numpy/typing/tests/data/reveal/false_positives.pyi deleted file mode 100644 index 7a2e016245a6..000000000000 --- a/numpy/typing/tests/data/reveal/false_positives.pyi +++ /dev/null @@ -1,18 +0,0 @@ -import sys -from typing import Any - -import numpy as np -import numpy.typing as npt - -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type - -AR_Any: npt.NDArray[Any] - -# Mypy bug where overload ambiguity is ignored for `Any`-parametrized types; -# xref numpy/numpy#20099 and python/mypy#11347 -# -# The expected output would be something akin to `npt.NDArray[Any]` -assert_type(AR_Any + 2, npt.NDArray[np.signedinteger[Any]]) diff --git a/numpy/typing/tests/data/reveal/fft.pyi b/numpy/typing/tests/data/reveal/fft.pyi index d6e9ba756d97..f3a29c75615c 100644 --- a/numpy/typing/tests/data/reveal/fft.pyi +++ b/numpy/typing/tests/data/reveal/fft.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_f8: npt.NDArray[np.float64] AR_c16: npt.NDArray[np.complex128] diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index efbe75cee26a..6891ce7382fe 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,13 +1,9 @@ -import sys -from typing import Any, Literal, TypeAlias +from typing import Literal, TypeAlias import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type a: np.flatiter[npt.NDArray[np.str_]] a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 94b3f5e5496d..40bb578d0d46 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,15 +1,11 @@ """Tests for :mod:`_core.fromnumeric`.""" -import sys -from typing import Any +from typing import Any, Literal as L, NoReturn import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type class NDArraySubclass(npt.NDArray[np.complex128]): ... @@ -21,6 +17,10 @@ AR_u8: npt.NDArray[np.uint64] AR_i8: npt.NDArray[np.int64] AR_O: npt.NDArray[np.object_] AR_subclass: NDArraySubclass +AR_m: npt.NDArray[np.timedelta64] +AR_0d: np.ndarray[tuple[()], np.dtype[Any]] +AR_1d: np.ndarray[tuple[int], np.dtype[Any]] +AR_nd: np.ndarray[tuple[int, ...], np.dtype[Any]] b: np.bool f4: np.float32 @@ -37,11 +37,11 @@ assert_type(np.take(AR_f4, [0]), npt.NDArray[np.float32]) assert_type(np.take([1], [0]), npt.NDArray[Any]) assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) -assert_type(np.reshape(b, 1), npt.NDArray[np.bool]) -assert_type(np.reshape(f4, 1), npt.NDArray[np.float32]) -assert_type(np.reshape(f, 1), npt.NDArray[Any]) -assert_type(np.reshape(AR_b, 1), npt.NDArray[np.bool]) -assert_type(np.reshape(AR_f4, 1), npt.NDArray[np.float32]) +assert_type(np.reshape(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.reshape(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) assert_type(np.choose(1, [True, True]), Any) assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) @@ -102,11 +102,11 @@ assert_type(np.searchsorted(AR_f4[0], 0), np.intp) assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) -assert_type(np.resize(b, (5, 5)), npt.NDArray[np.bool]) -assert_type(np.resize(f4, (5, 5)), npt.NDArray[np.float32]) -assert_type(np.resize(f, (5, 5)), npt.NDArray[Any]) -assert_type(np.resize(AR_b, (5, 5)), npt.NDArray[np.bool]) -assert_type(np.resize(AR_f4, (5, 5)), npt.NDArray[np.float32]) +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[Any]]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.float32]]) assert_type(np.squeeze(b), np.bool) assert_type(np.squeeze(f4), np.float32) @@ -121,23 +121,31 @@ assert_type(np.trace(AR_b), Any) assert_type(np.trace(AR_f4), Any) assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) -assert_type(np.ravel(b), npt.NDArray[np.bool]) -assert_type(np.ravel(f4), npt.NDArray[np.float32]) -assert_type(np.ravel(f), npt.NDArray[Any]) -assert_type(np.ravel(AR_b), npt.NDArray[np.bool]) -assert_type(np.ravel(AR_f4), npt.NDArray[np.float32]) +assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(b), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(f), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(b), NoReturn) +assert_type(np.nonzero(f4), NoReturn) assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) - -assert_type(np.shape(b), tuple[int, ...]) -assert_type(np.shape(f4), tuple[int, ...]) -assert_type(np.shape(f), tuple[int, ...]) +assert_type(np.nonzero(AR_0d), NoReturn) +assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) + +assert_type(np.shape(b), tuple[()]) +assert_type(np.shape(f), tuple[()]) +assert_type(np.shape([1]), tuple[int]) +assert_type(np.shape([[2]]), tuple[int, int]) +assert_type(np.shape([[[3]]]), tuple[int, ...]) assert_type(np.shape(AR_b), tuple[int, ...]) -assert_type(np.shape(AR_f4), tuple[int, ...]) +assert_type(np.shape(AR_nd), tuple[int, ...]) +# these fail on mypy, but it works as expected with pyright/pylance +# assert_type(np.shape(AR_0d), tuple[()]) +# assert_type(np.shape(AR_1d), tuple[int]) +# assert_type(np.shape(AR_2d), tuple[int, int]) assert_type(np.compress([True], b), npt.NDArray[np.bool]) assert_type(np.compress([True], f4), npt.NDArray[np.float32]) @@ -161,6 +169,12 @@ assert_type(np.sum(AR_f4), np.float32) assert_type(np.sum(AR_b, axis=0), Any) assert_type(np.sum(AR_f4, axis=0), Any) assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, None, np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) assert_type(np.all(b), np.bool) assert_type(np.all(f4), np.bool) @@ -294,6 +308,7 @@ assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) assert_type(np.mean(AR_b), np.floating[Any]) assert_type(np.mean(AR_i8), np.floating[Any]) assert_type(np.mean(AR_f4), np.floating[Any]) +assert_type(np.mean(AR_m), np.timedelta64) assert_type(np.mean(AR_c16), np.complexfloating[Any, Any]) assert_type(np.mean(AR_O), Any) assert_type(np.mean(AR_f4, axis=0), Any) @@ -301,6 +316,12 @@ assert_type(np.mean(AR_f4, keepdims=True), Any) assert_type(np.mean(AR_f4, dtype=float), Any) assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, None, np.float64), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) assert_type(np.std(AR_b), np.floating[Any]) assert_type(np.std(AR_i8), np.floating[Any]) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index 57af90cccb8a..f058382f2042 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -1,12 +1,9 @@ -import sys from typing import Any import numpy as np +from numpy._typing import _64Bit -if sys.version_info >= (3, 11): - from typing import assert_type, LiteralString -else: - from typing_extensions import assert_type, LiteralString +from typing_extensions import assert_type, LiteralString f: float f8: np.float64 @@ -19,8 +16,8 @@ u4: np.uint32 finfo_f8: np.finfo[np.float64] iinfo_i8: np.iinfo[np.int64] -assert_type(np.finfo(f), np.finfo[np.double]) -assert_type(np.finfo(f8), np.finfo[np.float64]) +assert_type(np.finfo(f), np.finfo[np.float64]) +assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) assert_type(np.finfo(c8), np.finfo[np.float32]) assert_type(np.finfo('f2'), np.finfo[np.floating[Any]]) diff --git a/numpy/typing/tests/data/reveal/histograms.pyi b/numpy/typing/tests/data/reveal/histograms.pyi index 67067eb7d63f..91a7d0394d20 100644 --- a/numpy/typing/tests/data/reveal/histograms.pyi +++ b/numpy/typing/tests/data/reveal/histograms.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index ad8be765fbc1..1db10928d2f5 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -1,13 +1,10 @@ -import sys +from types import EllipsisType from typing import Any, Literal import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_LIKE_b: list[bool] AR_LIKE_i: list[int] @@ -61,13 +58,13 @@ assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) -assert_type(np.index_exp[0:1], tuple[slice]) -assert_type(np.index_exp[0:1, None:3], tuple[slice, slice]) -assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, ellipsis, list[int]]) +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) -assert_type(np.s_[0:1], slice) -assert_type(np.s_[0:1, None:3], tuple[slice, slice]) -assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, ellipsis, list[int]]) +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index b630a130633a..9cd06a36f3e0 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,4 +1,3 @@ -import sys from fractions import Fraction from typing import Any from collections.abc import Callable @@ -6,10 +5,7 @@ from collections.abc import Callable import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type vectorized_func: np.vectorize @@ -26,9 +22,18 @@ AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_b: npt.NDArray[np.bool] AR_U: npt.NDArray[np.str_] -CHAR_AR_U: np.char.chararray[Any, np.dtype[np.str_]] +CHAR_AR_U: np.char.chararray[tuple[int, ...], np.dtype[np.str_]] -def func(*args: Any, **kwargs: Any) -> Any: ... +AR_b_list: list[npt.NDArray[np.bool]] + +def func( + a: npt.NDArray[Any], + posarg: bool = ..., + /, + arg: int = ..., + *, + kwarg: str = ..., +) -> npt.NDArray[Any]: ... assert_type(vectorized_func.pyfunc, Callable[..., Any]) assert_type(vectorized_func.cache, bool) @@ -69,15 +74,18 @@ assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float6 assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) -assert_type(np.piecewise(AR_LIKE_f8, AR_b, [func]), npt.NDArray[Any]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) assert_type(np.copy(AR_U), npt.NDArray[np.str_]) -assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) -assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[Any, np.dtype[np.str_]]) -assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[Any, np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U), npt.NDArray[np.str_]) +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[int, ...], np.dtype[np.str_]]) assert_type(np.gradient(AR_f8, axis=None), Any) assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) @@ -86,6 +94,15 @@ assert_type(np.diff("bob", n=0), str) assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` +assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) +assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` + assert_type(np.angle(f8), np.floating[Any]) assert_type(np.angle(AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating[Any]]) diff --git a/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/numpy/typing/tests/data/reveal/lib_polynomial.pyi index 885b40ee80a4..d41b1d56b75a 100644 --- a/numpy/typing/tests/data/reveal/lib_polynomial.pyi +++ b/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -1,14 +1,10 @@ -import sys from typing import Any, NoReturn from collections.abc import Iterator import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_b: npt.NDArray[np.bool] AR_u4: npt.NDArray[np.uint32] diff --git a/numpy/typing/tests/data/reveal/lib_utils.pyi b/numpy/typing/tests/data/reveal/lib_utils.pyi index 094b60140833..44ae59234c42 100644 --- a/numpy/typing/tests/data/reveal/lib_utils.pyi +++ b/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -1,14 +1,10 @@ -import sys from io import StringIO import numpy as np import numpy.typing as npt import numpy.lib.array_utils as array_utils -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR: npt.NDArray[np.float64] AR_DICT: dict[str, npt.NDArray[np.float64]] diff --git a/numpy/typing/tests/data/reveal/lib_version.pyi b/numpy/typing/tests/data/reveal/lib_version.pyi index 142d88bdbb8a..52c1218e9dfd 100644 --- a/numpy/typing/tests/data/reveal/lib_version.pyi +++ b/numpy/typing/tests/data/reveal/lib_version.pyi @@ -1,11 +1,6 @@ -import sys - from numpy.lib import NumpyVersion -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type version = NumpyVersion("1.8.0") diff --git a/numpy/typing/tests/data/reveal/linalg.pyi b/numpy/typing/tests/data/reveal/linalg.pyi index 8d594d42c3c1..f9aaa71ef4bc 100644 --- a/numpy/typing/tests/data/reveal/linalg.pyi +++ b/numpy/typing/tests/data/reveal/linalg.pyi @@ -1,4 +1,3 @@ -import sys from typing import Any import numpy as np @@ -7,10 +6,7 @@ from numpy.linalg._linalg import ( QRResult, EigResult, EighResult, SVDResult, SlogdetResult ) -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] diff --git a/numpy/typing/tests/data/reveal/matrix.pyi b/numpy/typing/tests/data/reveal/matrix.pyi index 1a0aa4e3c7b4..28a2531b4db2 100644 --- a/numpy/typing/tests/data/reveal/matrix.pyi +++ b/numpy/typing/tests/data/reveal/matrix.pyi @@ -1,22 +1,20 @@ -import sys -from typing import Any +from typing import Any, TypeAlias import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type -mat: np.matrix[Any, np.dtype[np.int64]] +_Shape2D: TypeAlias = tuple[int, int] + +mat: np.matrix[_Shape2D, np.dtype[np.int64]] ar_f8: npt.NDArray[np.float64] -assert_type(mat * 5, np.matrix[Any, Any]) -assert_type(5 * mat, np.matrix[Any, Any]) +assert_type(mat * 5, np.matrix[_Shape2D, Any]) +assert_type(5 * mat, np.matrix[_Shape2D, Any]) mat *= 5 -assert_type(mat**5, np.matrix[Any, Any]) +assert_type(mat**5, np.matrix[_Shape2D, Any]) mat **= 5 assert_type(mat.sum(), Any) @@ -32,18 +30,18 @@ assert_type(mat.argmax(), np.intp) assert_type(mat.argmin(), np.intp) assert_type(mat.ptp(), np.int64) -assert_type(mat.sum(axis=0), np.matrix[Any, Any]) -assert_type(mat.mean(axis=0), np.matrix[Any, Any]) -assert_type(mat.std(axis=0), np.matrix[Any, Any]) -assert_type(mat.var(axis=0), np.matrix[Any, Any]) -assert_type(mat.prod(axis=0), np.matrix[Any, Any]) -assert_type(mat.any(axis=0), np.matrix[Any, np.dtype[np.bool]]) -assert_type(mat.all(axis=0), np.matrix[Any, np.dtype[np.bool]]) -assert_type(mat.max(axis=0), np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.min(axis=0), np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.argmax(axis=0), np.matrix[Any, np.dtype[np.intp]]) -assert_type(mat.argmin(axis=0), np.matrix[Any, np.dtype[np.intp]]) -assert_type(mat.ptp(axis=0), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.min(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.argmax(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.argmin(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.ptp(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) assert_type(mat.sum(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.mean(out=ar_f8), npt.NDArray[np.float64]) @@ -58,19 +56,19 @@ assert_type(mat.argmax(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.argmin(out=ar_f8), npt.NDArray[np.float64]) assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) -assert_type(mat.T, np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.I, np.matrix[Any, Any]) -assert_type(mat.A, npt.NDArray[np.int64]) +assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.A1, npt.NDArray[np.int64]) -assert_type(mat.H, np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.getT(), np.matrix[Any, np.dtype[np.int64]]) -assert_type(mat.getI(), np.matrix[Any, Any]) -assert_type(mat.getA(), npt.NDArray[np.int64]) +assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) assert_type(mat.getA1(), npt.NDArray[np.int64]) -assert_type(mat.getH(), np.matrix[Any, np.dtype[np.int64]]) +assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) -assert_type(np.bmat(ar_f8), np.matrix[Any, Any]) -assert_type(np.bmat([[0, 1, 2]]), np.matrix[Any, Any]) -assert_type(np.bmat("mat"), np.matrix[Any, Any]) +assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) +assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) +assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) -assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[Any, Any]) +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) diff --git a/numpy/typing/tests/data/reveal/memmap.pyi b/numpy/typing/tests/data/reveal/memmap.pyi index 53278ff1122b..b1f985382c6b 100644 --- a/numpy/typing/tests/data/reveal/memmap.pyi +++ b/numpy/typing/tests/data/reveal/memmap.pyi @@ -1,12 +1,8 @@ -import sys from typing import Any import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type memmap_obj: np.memmap[Any, np.dtype[np.str_]] diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 11cdeb2a4273..db79504fdd1f 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,42 +1,71 @@ -import sys -from typing import Any +import datetime as dt +from typing import Literal as L + +from typing_extensions import assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit +from numpy._typing import _64Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +f8: np.float64 +i8: np.int64 +u8: np.uint64 -f8 = np.float64() -i8 = np.int64() -u8 = np.uint64() +f4: np.float32 +i4: np.int32 +u4: np.uint32 -f4 = np.float32() -i4 = np.int32() -u4 = np.uint32() +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] -td = np.timedelta64(0, "D") -b_ = np.bool() +b_: np.bool -b = bool() -f = float() -i = int() +b: bool +i: int +f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] # Time structures -assert_type(td % td, np.timedelta64) -assert_type(AR_m % td, npt.NDArray[np.timedelta64]) -assert_type(td % AR_m, npt.NDArray[np.timedelta64]) - -assert_type(divmod(td, td), tuple[np.int64, np.timedelta64]) -assert_type(divmod(AR_m, td), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) -assert_type(divmod(td, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +assert_type(m_nat % m, np.timedelta64[None]) +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) +assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) # Bool @@ -50,11 +79,12 @@ assert_type(b_ % f8, np.float64) assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i), tuple[np.int_, np.int_]) -assert_type(divmod(b_, f), tuple[np.float64, np.float64]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i8), tuple[np.int64, np.int64]) -assert_type(divmod(b_, u8), tuple[np.uint64, np.uint64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) @@ -79,70 +109,73 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) # int assert_type(i8 % b, np.int64) -assert_type(i8 % f, np.float64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f8, np.float64) -assert_type(i4 % i8, np.signedinteger[_32Bit | _64Bit]) -assert_type(i4 % f8, np.floating[_32Bit | _64Bit]) +assert_type(i8 % f, np.float64 | np.floating[_64Bit]) +assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) +assert_type(i4 % i8, np.int64 | np.int32) +assert_type(i4 % f8, np.float64 | np.float32) assert_type(i4 % i4, np.int32) assert_type(i4 % f4, np.float32) -assert_type(i8 % AR_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.int64, np.int64]) -assert_type(divmod(i8, f), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) -assert_type(divmod(i8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(i8, i4), tuple[np.signedinteger[_32Bit | _64Bit], np.signedinteger[_32Bit | _64Bit]]) -assert_type(divmod(i8, f4), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.int64) -assert_type(f % i8, np.float64) +assert_type(f % i8, np.float64 | np.floating[_64Bit]) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) -assert_type(i8 % i4, np.signedinteger[_32Bit | _64Bit]) -assert_type(f8 % i4, np.floating[_32Bit | _64Bit]) +assert_type(i8 % i4, np.int64 | np.int32) +assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) assert_type(f4 % i4, np.float32) -assert_type(AR_b % i8, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.int64, np.int64]) -assert_type(divmod(f, i8), tuple[np.float64, np.float64]) +assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.signedinteger[_32Bit | _64Bit], np.signedinteger[_32Bit | _64Bit]]) -assert_type(divmod(f4, i8), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) -assert_type(divmod(f4, i4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_32Bit | _64Bit]) +assert_type(i8 % f4, np.floating[_64Bit] | np.float32) assert_type(f4 % f4, np.float32) -assert_type(f8 % AR_b, npt.NDArray[np.floating[Any]]) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) assert_type(divmod(f8, b), tuple[np.float64, np.float64]) assert_type(divmod(f8, f), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f8, f4), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) +assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(b % f8, np.float64) -assert_type(f % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` assert_type(f8 % f8, np.float64) assert_type(f8 % f8, np.float64) assert_type(f4 % f4, np.float32) -assert_type(AR_b % f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f4, f8), tuple[np.floating[_32Bit | _64Bit], np.floating[_32Bit | _64Bit]]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/modules.pyi b/numpy/typing/tests/data/reveal/modules.pyi index 1ab01cd079c2..1e4e895bf5f8 100644 --- a/numpy/typing/tests/data/reveal/modules.pyi +++ b/numpy/typing/tests/data/reveal/modules.pyi @@ -1,13 +1,9 @@ -import sys import types import numpy as np from numpy import f2py -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type assert_type(np, types.ModuleType) diff --git a/numpy/typing/tests/data/reveal/multiarray.pyi b/numpy/typing/tests/data/reveal/multiarray.pyi index 085c5ff568be..cae14ee57e22 100644 --- a/numpy/typing/tests/data/reveal/multiarray.pyi +++ b/numpy/typing/tests/data/reveal/multiarray.pyi @@ -1,14 +1,10 @@ -import sys import datetime as dt -from typing import Any, TypeVar +from typing import Any, Literal, TypeVar import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import Unpack, assert_type _SCT = TypeVar("_SCT", bound=np.generic, covariant=True) @@ -37,7 +33,15 @@ date_scalar: dt.date date_seq: list[dt.date] timedelta_seq: list[dt.timedelta] -def func(a: int) -> bool: ... +n1: Literal[1] +n2: Literal[2] +n3: Literal[3] + +f8: np.float64 + +def func11(a: int) -> bool: ... +def func21(a: int, b: int) -> int: ... +def func12(a: int) -> tuple[complex, bool]: ... assert_type(next(b_f8), tuple[Any, ...]) assert_type(b_f8.reset(), None) @@ -106,7 +110,56 @@ assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) assert_type(np.promote_types(np.int32, np.int64), np.dtype[Any]) assert_type(np.promote_types("f4", float), np.dtype[Any]) -assert_type(np.frompyfunc(func, 1, 1, identity=None), np.ufunc) +assert_type(np.frompyfunc(func11, n1, n1).nin, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nargs, Literal[2]) +assert_type(np.frompyfunc(func11, n1, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).identity, None) +assert_type(np.frompyfunc(func11, n1, n1).signature, None) +assert_type(np.frompyfunc(func11, n1, n1)(f8), bool) +assert_type(np.frompyfunc(func11, n1, n1)(AR_f8), bool | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func11, n1, n1).at(AR_f8, AR_i8), None) + +assert_type(np.frompyfunc(func21, n2, n1).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).identity, None) +assert_type(np.frompyfunc(func21, n2, n1).signature, None) +assert_type(np.frompyfunc(func21, n2, n1)(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1)(AR_f8, f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1)(f8, AR_f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduce(AR_f8, axis=0), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).accumulate(AR_f8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduceat(AR_f8, AR_i8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).outer(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1).outer(AR_f8, f8), int | npt.NDArray[np.object_]) + +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).identity, int) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).signature, None) + +assert_type(np.frompyfunc(func12, n1, n2).nin, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).nout, Literal[2]) +assert_type(np.frompyfunc(func12, n1, n2).nargs, int) +assert_type(np.frompyfunc(func12, n1, n2).ntypes, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).identity, None) +assert_type(np.frompyfunc(func12, n1, n2).signature, None) +assert_type( + np.frompyfunc(func12, n2, n2)(f8, f8), + tuple[complex, complex, Unpack[tuple[complex, ...]]], +) +assert_type( + np.frompyfunc(func12, n2, n2)(AR_f8, f8), + tuple[ + complex | npt.NDArray[np.object_], + complex | npt.NDArray[np.object_], + Unpack[tuple[complex | npt.NDArray[np.object_], ...]], + ], +) assert_type(np.datetime_data("m8[D]"), tuple[str, int]) assert_type(np.datetime_data(np.datetime64), tuple[str, int]) diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/numpy/typing/tests/data/reveal/nbit_base_example.pyi index ac2eb1d25323..add031ac884a 100644 --- a/numpy/typing/tests/data/reveal/nbit_base_example.pyi +++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -1,14 +1,10 @@ -import sys from typing import TypeVar import numpy as np import numpy.typing as npt from numpy._typing import _64Bit, _32Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type T1 = TypeVar("T1", bound=npt.NBitBase) T2 = TypeVar("T2", bound=npt.NBitBase) @@ -21,7 +17,7 @@ i4: np.int32 f8: np.float64 f4: np.float32 -assert_type(add(f8, i8), np.float64) +assert_type(add(f8, i8), np.floating[_64Bit]) assert_type(add(f4, i8), np.floating[_32Bit | _64Bit]) assert_type(add(f8, i4), np.floating[_32Bit | _64Bit]) -assert_type(add(f4, i4), np.float32) +assert_type(add(f4, i4), np.floating[_32Bit]) diff --git a/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi new file mode 100644 index 000000000000..22f0d005a7d2 --- /dev/null +++ b/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -0,0 +1,79 @@ +from typing import Protocol, TypeAlias, TypeVar +from typing_extensions import assert_type +import numpy as np + +from numpy._typing import _64Bit + + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +class CanAbs(Protocol[_T_co]): + def __abs__(self, /) -> _T_co: ... + +class CanInvert(Protocol[_T_co]): + def __invert__(self, /) -> _T_co: ... + +class CanNeg(Protocol[_T_co]): + def __neg__(self, /) -> _T_co: ... + +class CanPos(Protocol[_T_co]): + def __pos__(self, /) -> _T_co: ... + +def do_abs(x: CanAbs[_T]) -> _T: ... +def do_invert(x: CanInvert[_T]) -> _T: ... +def do_neg(x: CanNeg[_T]) -> _T: ... +def do_pos(x: CanPos[_T]) -> _T: ... + +_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] +_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] +_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] +_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] +_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] +_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] +_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] +_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] + +b1_1d: _Bool_1d +u1_1d: _UInt8_1d +i2_1d: _Int16_1d +q_1d: _LongLong_1d +f4_1d: _Float32_1d +f8_1d: _Float64_1d +g_1d: _LongDouble_1d +c8_1d: _Complex64_1d +c16_1d: _Complex128_1d +G_1d: _CLongDouble_1d + +assert_type(do_abs(b1_1d), _Bool_1d) +assert_type(do_abs(u1_1d), _UInt8_1d) +assert_type(do_abs(i2_1d), _Int16_1d) +assert_type(do_abs(q_1d), _LongLong_1d) +assert_type(do_abs(f4_1d), _Float32_1d) +assert_type(do_abs(f8_1d), _Float64_1d) +assert_type(do_abs(g_1d), _LongDouble_1d) + +assert_type(do_abs(c8_1d), _Float32_1d) +# NOTE: Unfortunately it's not possible to have this return a `float64` sctype, see +# https://github.com/python/mypy/issues/14070 +assert_type(do_abs(c16_1d), np.ndarray[tuple[int], np.dtype[np.floating[_64Bit]]]) +assert_type(do_abs(G_1d), _LongDouble_1d) + +assert_type(do_invert(b1_1d), _Bool_1d) +assert_type(do_invert(u1_1d), _UInt8_1d) +assert_type(do_invert(i2_1d), _Int16_1d) +assert_type(do_invert(q_1d), _LongLong_1d) + +assert_type(do_neg(u1_1d), _UInt8_1d) +assert_type(do_neg(i2_1d), _Int16_1d) +assert_type(do_neg(q_1d), _LongLong_1d) +assert_type(do_neg(f4_1d), _Float32_1d) +assert_type(do_neg(c16_1d), _Complex128_1d) + +assert_type(do_pos(u1_1d), _UInt8_1d) +assert_type(do_pos(i2_1d), _Int16_1d) +assert_type(do_pos(q_1d), _LongLong_1d) +assert_type(do_pos(f4_1d), _Float32_1d) +assert_type(do_pos(c16_1d), _Complex128_1d) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index a5495b55b030..b6909e64f780 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -1,24 +1,37 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type -nd: npt.NDArray[np.int_] +b1_0d: np.ndarray[tuple[()], np.dtype[np.bool]] +u2_1d: np.ndarray[tuple[int], np.dtype[np.uint16]] +i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] +f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] +i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ # item -assert_type(nd.item(), int) -assert_type(nd.item(1), int) -assert_type(nd.item(0, 1), int) -assert_type(nd.item((0, 1)), int) +assert_type(i0_nd.item(), int) +assert_type(i0_nd.item(1), int) +assert_type(i0_nd.item(0, 1), int) +assert_type(i0_nd.item((0, 1)), int) + +assert_type(b1_0d.item(()), bool) +assert_type(u2_1d.item((0,)), int) +assert_type(i4_2d.item(-1, 2), int) +assert_type(f8_3d.item(2, 1, -1), float) +assert_type(cG_4d.item(-0xEd_fed_Deb_a_dead_bee), complex) # c'mon Ed, we talked about this... # tolist -assert_type(nd.tolist(), Any) +assert_type(b1_0d.tolist(), bool) +assert_type(u2_1d.tolist(), list[int]) +assert_type(i4_2d.tolist(), list[list[int]]) +assert_type(f8_3d.tolist(), list[list[list[float]]]) +assert_type(cG_4d.tolist(), complex | list[complex] | list[list[complex]] | list[list[list[Any]]]) +assert_type(i0_nd.tolist(), int | list[int] | list[list[int]] | list[list[list[Any]]]) # itemset does not return a value # tostring is pretty simple @@ -28,34 +41,41 @@ assert_type(nd.tolist(), Any) # dumps is pretty simple # astype -assert_type(nd.astype("float"), npt.NDArray[Any]) -assert_type(nd.astype(float), npt.NDArray[Any]) -assert_type(nd.astype(np.float64), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K"), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) -assert_type(nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype("float"), npt.NDArray[Any]) +assert_type(i0_nd.astype(float), npt.NDArray[Any]) +assert_type(i0_nd.astype(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) + +assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) -assert_type(np.astype(nd, np.float64), npt.NDArray[np.float64]) +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic[Any]]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[Any]]) # byteswap -assert_type(nd.byteswap(), npt.NDArray[np.int_]) -assert_type(nd.byteswap(True), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) # copy -assert_type(nd.copy(), npt.NDArray[np.int_]) -assert_type(nd.copy("C"), npt.NDArray[np.int_]) +assert_type(i0_nd.copy(), npt.NDArray[np.int_]) +assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) -assert_type(nd.view(), npt.NDArray[np.int_]) -assert_type(nd.view(np.float64), npt.NDArray[np.float64]) -assert_type(nd.view(float), npt.NDArray[Any]) -assert_type(nd.view(np.float64, np.matrix), np.matrix[Any, Any]) +assert_type(i0_nd.view(), npt.NDArray[np.int_]) +assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.view(float), npt.NDArray[Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) # getfield -assert_type(nd.getfield("float"), npt.NDArray[Any]) -assert_type(nd.getfield(float), npt.NDArray[Any]) -assert_type(nd.getfield(np.float64), npt.NDArray[np.float64]) -assert_type(nd.getfield(np.float64, 8), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) +assert_type(i0_nd.getfield(float), npt.NDArray[Any]) +assert_type(i0_nd.getfield(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield(np.float64, 8), npt.NDArray[np.float64]) # setflags does not return a value # fill does not return a value diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 783e18f5c632..7c619c1e156e 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -6,18 +6,15 @@ function-based counterpart in `../from_numeric.py`. """ -import sys import operator import ctypes as ct +from types import ModuleType from typing import Any, Literal import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import CapsuleType, assert_type class SubClass(npt.NDArray[np.object_]): ... @@ -34,8 +31,8 @@ AR_V: npt.NDArray[np.void] ctypes_obj = AR_f8.ctypes -assert_type(AR_f8.__dlpack__(), Any) -assert_type(AR_f8.__dlpack_device__(), tuple[int, Literal[0]]) +assert_type(AR_f8.__dlpack__(), CapsuleType) +assert_type(AR_f8.__dlpack_device__(), tuple[Literal[1], Literal[0]]) assert_type(ctypes_obj.data, int) assert_type(ctypes_obj.shape, ct.Array[np.ctypeslib.c_intp]) @@ -48,14 +45,14 @@ assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) assert_type(f8.all(), np.bool) assert_type(AR_f8.all(), np.bool) -assert_type(AR_f8.all(axis=0), Any) -assert_type(AR_f8.all(keepdims=True), Any) +assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) assert_type(AR_f8.all(out=B), SubClass) assert_type(f8.any(), np.bool) assert_type(AR_f8.any(), np.bool) -assert_type(AR_f8.any(axis=0), Any) -assert_type(AR_f8.any(keepdims=True), Any) +assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) assert_type(AR_f8.any(out=B), SubClass) assert_type(f8.argmax(), np.intp) @@ -176,14 +173,18 @@ assert_type(AR_f8.trace(out=B), SubClass) assert_type(AR_f8.item(), float) assert_type(AR_U.item(), str) -assert_type(AR_f8.ravel(), npt.NDArray[np.float64]) -assert_type(AR_U.ravel(), npt.NDArray[np.str_]) +assert_type(AR_f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) -assert_type(AR_f8.flatten(), npt.NDArray[np.float64]) -assert_type(AR_U.flatten(), npt.NDArray[np.str_]) +assert_type(AR_f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) -assert_type(AR_f8.reshape(1), npt.NDArray[np.float64]) -assert_type(AR_U.reshape(1), npt.NDArray[np.str_]) +assert_type(AR_i8.reshape(None), npt.NDArray[np.int64]) +assert_type(AR_f8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_c8.reshape(2, 3, 4, 5), np.ndarray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(AR_m.reshape(()), np.ndarray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(AR_U.reshape([]), np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(AR_V.reshape((480, 720, 4)), np.ndarray[tuple[int, int, int], np.dtype[np.void]]) assert_type(int(AR_f8), int) assert_type(int(AR_U), int) @@ -229,5 +230,5 @@ assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) -assert_type(f8.__array_namespace__(), Any) -assert_type(AR_f8.__array_namespace__(), Any) +assert_type(f8.__array_namespace__(), ModuleType) +assert_type(AR_f8.__array_namespace__(), ModuleType) diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi index 9a41a90f1ee9..25637134088c 100644 --- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -1,24 +1,19 @@ -import sys -from typing import Any - import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type nd: npt.NDArray[np.int64] # reshape -assert_type(nd.reshape(), npt.NDArray[np.int64]) -assert_type(nd.reshape(4), npt.NDArray[np.int64]) -assert_type(nd.reshape(2, 2), npt.NDArray[np.int64]) -assert_type(nd.reshape((2, 2)), npt.NDArray[np.int64]) +assert_type(nd.reshape(None), npt.NDArray[np.int64]) +assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) -assert_type(nd.reshape((2, 2), order="C"), npt.NDArray[np.int64]) -assert_type(nd.reshape(4, order="C"), npt.NDArray[np.int64]) +assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # resize does not return a value @@ -31,12 +26,12 @@ assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) # flatten -assert_type(nd.flatten(), npt.NDArray[np.int64]) -assert_type(nd.flatten("C"), npt.NDArray[np.int64]) +assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # ravel -assert_type(nd.ravel(), npt.NDArray[np.int64]) -assert_type(nd.ravel("C"), npt.NDArray[np.int64]) +assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) # squeeze assert_type(nd.squeeze(), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/nditer.pyi b/numpy/typing/tests/data/reveal/nditer.pyi index 589453e777f2..b5723c41310e 100644 --- a/numpy/typing/tests/data/reveal/nditer.pyi +++ b/numpy/typing/tests/data/reveal/nditer.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type nditer_obj: np.nditer diff --git a/numpy/typing/tests/data/reveal/nested_sequence.pyi b/numpy/typing/tests/data/reveal/nested_sequence.pyi index 3ca23d6875e8..06acbbd9ce84 100644 --- a/numpy/typing/tests/data/reveal/nested_sequence.pyi +++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -1,13 +1,9 @@ -import sys from collections.abc import Sequence from typing import Any from numpy._typing import _NestedSequence -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type a: Sequence[int] b: Sequence[Sequence[int]] diff --git a/numpy/typing/tests/data/reveal/npyio.pyi b/numpy/typing/tests/data/reveal/npyio.pyi index 1267b2811c68..d4c47b665ca5 100644 --- a/numpy/typing/tests/data/reveal/npyio.pyi +++ b/numpy/typing/tests/data/reveal/npyio.pyi @@ -1,5 +1,4 @@ import re -import sys import zipfile import pathlib from typing import IO, Any @@ -9,10 +8,7 @@ import numpy.typing as npt import numpy as np from numpy.lib._npyio_impl import BagObj -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type str_path: str pathlib_path: pathlib.Path diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 1f0a8b36fff8..742ec2a4c827 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -5,16 +5,12 @@ Does not include tests which fall under ``array_constructors``. """ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type class SubClass(npt.NDArray[np.int64]): ... diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index cf558ddc9718..a8ad4e0e1f4b 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -1,12 +1,8 @@ -import sys from typing import Literal +from typing_extensions import assert_type import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type assert_type( np.ScalarType, @@ -48,8 +44,10 @@ assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) assert_type(np.ScalarType[8], type[np.csingle]) assert_type(np.ScalarType[10], type[np.clongdouble]) -assert_type(np.bool_, type[np.bool]) +assert_type(np.bool_(object()), np.bool) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) + +assert_type(np.sctypeDict['uint8'], type[np.generic]) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi index 60e92709a2e6..40c13e646f4a 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -1,5 +1,4 @@ from fractions import Fraction -import sys from collections.abc import Sequence from decimal import Decimal from typing import Any, Literal as L, TypeAlias, TypeVar @@ -8,10 +7,7 @@ import numpy as np import numpy.polynomial as npp import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import LiteralString, assert_type -else: - from typing_extensions import LiteralString, assert_type +from typing_extensions import assert_type, LiteralString _Ar_x: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] _Ar_f: TypeAlias = npt.NDArray[np.floating[Any]] diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi index eecdb14e1c3c..ca5852808ce7 100644 --- a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -1,4 +1,3 @@ -import sys from collections.abc import Sequence from decimal import Decimal from fractions import Fraction @@ -9,10 +8,7 @@ import numpy.typing as npt import numpy.polynomial.polyutils as pu from numpy.polynomial._polytypes import _Tuple2 -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type _ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] _ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] @@ -163,9 +159,9 @@ assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating[Any]]) assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating[Any]]) -assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[np.floating[Any]]) -assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[np.complexfloating[Any, Any]]) -assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[complex]) assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi index a60d05afd01d..80ec9c0ff56a 100644 --- a/numpy/typing/tests/data/reveal/polynomial_series.pyi +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -1,15 +1,11 @@ from collections.abc import Sequence -import sys from typing import Any, TypeAlias import numpy as np import numpy.polynomial as npp import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type _ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] _ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index b31b4b56f870..03b0712d8c77 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -1,4 +1,3 @@ -import sys import threading from typing import Any from collections.abc import Sequence @@ -12,10 +11,7 @@ from numpy.random._sfc64 import SFC64 from numpy.random._philox import Philox from numpy.random.bit_generator import SeedSequence, SeedlessSeedSequence -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type def_rng = np.random.default_rng() seed_seq = np.random.SeedSequence() @@ -1551,5 +1547,5 @@ assert_type(random_st.tomaxint(), int) assert_type(random_st.tomaxint(1), npt.NDArray[np.int64]) assert_type(random_st.tomaxint((1,)), npt.NDArray[np.int64]) -assert_type(np.random.set_bit_generator(pcg64), None) -assert_type(np.random.get_bit_generator(), np.random.BitGenerator) +assert_type(np.random.mtrand.set_bit_generator(pcg64), None) +assert_type(np.random.mtrand.get_bit_generator(), np.random.BitGenerator) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index f2ae0891b485..13db0a969773 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -1,14 +1,10 @@ import io -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] REC_AR_V: np.recarray[Any, np.dtype[np.record]] @@ -74,7 +70,7 @@ assert_type( ) assert_type( - np.rec.fromrecords((1, 1.5)), + np.rec.fromrecords((1, 1.5)), np.recarray[Any, np.dtype[np.record]] ) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 95775e9a8dbe..d3070437b740 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -1,13 +1,10 @@ -import sys -from typing import Any, Literal +from typing import Any, Literal, TypeAlias +from typing_extensions import Unpack, assert_type import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +_1: TypeAlias = Literal[1] b: np.bool u8: np.uint64 @@ -19,6 +16,11 @@ m: np.timedelta64 U: np.str_ S: np.bytes_ V: np.void +O: np.object_ # cannot exists at runtime + +array_nd: np.ndarray[Any, Any] +array_0d: np.ndarray[tuple[()], Any] +array_2d_2x2: np.ndarray[tuple[Literal[2], Literal[2]], Any] assert_type(c8.real, np.float32) assert_type(c8.imag, np.float32) @@ -50,7 +52,7 @@ assert_type(V[["field1", "field2"]], np.void) V[0] = 5 # Aliases -assert_type(np.bool_(), np.bool) +assert_type(np.bool_(), np.bool[Literal[False]]) assert_type(np.byte(), np.byte) assert_type(np.short(), np.short) assert_type(np.intc(), np.intc) @@ -92,29 +94,38 @@ assert_type(c16.tolist(), complex) assert_type(U.tolist(), str) assert_type(S.tolist(), bytes) -assert_type(b.ravel(), npt.NDArray[np.bool]) -assert_type(i8.ravel(), npt.NDArray[np.int64]) -assert_type(u8.ravel(), npt.NDArray[np.uint64]) -assert_type(f8.ravel(), npt.NDArray[np.float64]) -assert_type(c16.ravel(), npt.NDArray[np.complex128]) -assert_type(U.ravel(), npt.NDArray[np.str_]) -assert_type(S.ravel(), npt.NDArray[np.bytes_]) - -assert_type(b.flatten(), npt.NDArray[np.bool]) -assert_type(i8.flatten(), npt.NDArray[np.int64]) -assert_type(u8.flatten(), npt.NDArray[np.uint64]) -assert_type(f8.flatten(), npt.NDArray[np.float64]) -assert_type(c16.flatten(), npt.NDArray[np.complex128]) -assert_type(U.flatten(), npt.NDArray[np.str_]) -assert_type(S.flatten(), npt.NDArray[np.bytes_]) - -assert_type(b.reshape(1), npt.NDArray[np.bool]) -assert_type(i8.reshape(1), npt.NDArray[np.int64]) -assert_type(u8.reshape(1), npt.NDArray[np.uint64]) -assert_type(f8.reshape(1), npt.NDArray[np.float64]) -assert_type(c16.reshape(1), npt.NDArray[np.complex128]) -assert_type(U.reshape(1), npt.NDArray[np.str_]) -assert_type(S.reshape(1), npt.NDArray[np.bytes_]) +assert_type(b.ravel(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.ravel(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.ravel(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.ravel(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.flatten(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.flatten(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.flatten(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.reshape(()), np.bool) +assert_type(i8.reshape([]), np.int64) +assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type( + S.reshape(1, 1, 1, 1, 1), + np.ndarray[ + # len(shape) >= 5 + tuple[_1, _1, _1, _1, _1, Unpack[tuple[_1, ...]]], + np.dtype[np.bytes_], + ], +) assert_type(i8.astype(float), Any) assert_type(i8.astype(np.float64), np.float64) @@ -156,3 +167,27 @@ assert_type(f8.__ceil__(), int) assert_type(f8.__floor__(), int) assert_type(i8.is_integer(), Literal[True]) + +assert_type(O.real, np.object_) +assert_type(O.imag, np.object_) +assert_type(int(O), int) +assert_type(float(O), float) +assert_type(complex(O), complex) + +# These fail fail because of a mypy __new__ bug: +# https://github.com/python/mypy/issues/15182 +# According to the typing spec, the following statements are valid, see +# https://typing.readthedocs.io/en/latest/spec/constructors.html#new-method + +# assert_type(np.object_(), None) +# assert_type(np.object_(None), None) +# assert_type(np.object_(array_nd), np.ndarray[Any, np.dtype[np.object_]]) +# assert_type(np.object_([]), npt.NDArray[np.object_]) +# assert_type(np.object_(()), npt.NDArray[np.object_]) +# assert_type(np.object_(range(4)), npt.NDArray[np.object_]) +# assert_type(np.object_(+42), int) +# assert_type(np.object_(1 / 137), float) +# assert_type(np.object_('Developers! ' * (1 << 6)), str) +# assert_type(np.object_(object()), object) +# assert_type(np.object_({False, True, NotADirectoryError}), set[Any]) +# assert_type(np.object_({'spam': 'food', 'ham': 'food'}), dict[str, str]) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index 526f3abf161c..a4b4bba3f9fc 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type i8: np.int64 f8: np.float64 diff --git a/numpy/typing/tests/data/reveal/stride_tricks.pyi b/numpy/typing/tests/data/reveal/stride_tricks.pyi index 893e1bc314bc..2ce666280f64 100644 --- a/numpy/typing/tests/data/reveal/stride_tricks.pyi +++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_f8: npt.NDArray[np.float64] AR_LIKE_f: list[float] diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 500a250b055a..649902f0c6d3 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -1,137 +1,193 @@ -import sys - import numpy as np import numpy.typing as npt +import numpy._typing as np_t -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type +from typing import TypeAlias AR_U: npt.NDArray[np.str_] AR_S: npt.NDArray[np.bytes_] +AR_T: np.ndarray[np_t._Shape, np.dtypes.StringDType] + +AR_T_alias: TypeAlias = np.ndarray[np_t._Shape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.strings.multiply(AR_T, 5), AR_T_alias) assert_type(np.strings.mod(AR_U, "test"), npt.NDArray[np.str_]) assert_type(np.strings.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.strings.mod(AR_T, "test"), AR_T_alias) assert_type(np.strings.capitalize(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.capitalize(AR_T), AR_T_alias) assert_type(np.strings.center(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.strings.center(AR_T, 5), AR_T_alias) assert_type(np.strings.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.strings.encode(AR_T), npt.NDArray[np.bytes_]) assert_type(np.strings.decode(AR_S), npt.NDArray[np.str_]) assert_type(np.strings.expandtabs(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) - -assert_type(np.strings.join(AR_U, "_"), npt.NDArray[np.str_]) -assert_type(np.strings.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.strings.expandtabs(AR_T), AR_T_alias) assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.lstrip(AR_T), AR_T_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_TU_alias) + assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.rstrip(AR_T), AR_T_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_TU_alias) + assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.strip(AR_T), AR_T_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_TU_alias) assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) assert_type(np.strings.partition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.strings.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.partition(AR_T, "\n"), AR_TU_alias) + assert_type(np.strings.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) assert_type(np.strings.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rpartition(AR_T, "\n"), AR_TU_alias) assert_type(np.strings.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) assert_type(np.strings.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.replace(AR_T, "_", "_"), AR_TU_alias) -assert_type(np.strings.split(AR_U, "_"), npt.NDArray[np.object_]) -assert_type(np.strings.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) -assert_type(np.strings.rsplit(AR_U, "_"), npt.NDArray[np.object_]) -assert_type(np.strings.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.strings.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.lower(AR_T), AR_T_alias) -assert_type(np.strings.splitlines(AR_U), npt.NDArray[np.object_]) -assert_type(np.strings.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.upper(AR_T), AR_T_alias) assert_type(np.strings.swapcase(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.swapcase(AR_T), AR_T_alias) assert_type(np.strings.title(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.title(AR_S), npt.NDArray[np.bytes_]) - -assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) -assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.title(AR_T), AR_T_alias) assert_type(np.strings.zfill(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.strings.zfill(AR_T, 5), AR_T_alias) assert_type(np.strings.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.strings.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + assert_type(np.strings.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.strings.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) assert_type(np.strings.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.strings.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.strings.translate(AR_T, ""), AR_T_alias) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 2a0d83493f6e..5301090a5f4b 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -11,10 +11,7 @@ from pathlib import Path import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 9d808dbb1e0d..2f1cd56d1e7b 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any, TypeVar import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type _SCT = TypeVar("_SCT", bound=np.generic) @@ -28,6 +24,7 @@ AR_c: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_LIKE_b: list[bool] +AR_LIKE_c: list[complex] assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) @@ -62,28 +59,84 @@ assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +assert_type( + np.histogram2d(AR_LIKE_c, AR_LIKE_c), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128 | np.float64], + npt.NDArray[np.complex128 | np.float64], + ], +) assert_type( np.histogram2d(AR_i, AR_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( - np.histogram2d(AR_f, AR_f), + np.histogram2d(AR_f, AR_i), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.complexfloating[Any, Any]], - npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, bins=8), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_f, bins=(8, 5)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_i, bins=AR_u), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.bool | np.complex128], + npt.NDArray[np.bool | np.complex128], ], ) diff --git a/numpy/typing/tests/data/reveal/type_check.pyi b/numpy/typing/tests/data/reveal/type_check.pyi index 6d357278762b..4a7ef36e9e26 100644 --- a/numpy/typing/tests/data/reveal/type_check.pyi +++ b/numpy/typing/tests/data/reveal/type_check.pyi @@ -1,14 +1,10 @@ -import sys from typing import Any, Literal import numpy as np import numpy.typing as npt from numpy._typing import _16Bit, _32Bit, _64Bit, _128Bit -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type f8: np.float64 f: float @@ -24,20 +20,18 @@ AR_c16: npt.NDArray[np.complex128] AR_LIKE_f: list[float] -class RealObj: +class ComplexObj: real: slice - -class ImagObj: imag: slice assert_type(np.mintypecode(["f8"], typeset="qfQF"), str) -assert_type(np.real(RealObj()), slice) +assert_type(np.real(ComplexObj()), slice) assert_type(np.real(AR_f8), npt.NDArray[np.float64]) assert_type(np.real(AR_c16), npt.NDArray[np.float64]) assert_type(np.real(AR_LIKE_f), npt.NDArray[Any]) -assert_type(np.imag(ImagObj()), slice) +assert_type(np.imag(ComplexObj()), slice) assert_type(np.imag(AR_f8), npt.NDArray[np.float64]) assert_type(np.imag(AR_c16), npt.NDArray[np.float64]) assert_type(np.imag(AR_LIKE_f), npt.NDArray[Any]) @@ -59,7 +53,10 @@ assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) -assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type( + np.real_if_close(AR_c16), + npt.NDArray[np.floating[_64Bit]] | npt.NDArray[np.complexfloating[_64Bit, _64Bit]], +) assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32] | npt.NDArray[np.complex64]) assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) @@ -68,7 +65,7 @@ assert_type(np.typename("B"), Literal["unsigned char"]) assert_type(np.typename("V"), Literal["void"]) assert_type(np.typename("S1"), Literal["character"]) -assert_type(np.common_type(AR_i4), type[np.float64]) +assert_type(np.common_type(AR_i4), type[np.floating[_64Bit]]) assert_type(np.common_type(AR_f2), type[np.float16]) assert_type(np.common_type(AR_f2, AR_i4), type[np.floating[_16Bit | _64Bit]]) assert_type(np.common_type(AR_f16, AR_i4), type[np.floating[_64Bit | _128Bit]]) diff --git a/numpy/typing/tests/data/reveal/ufunc_config.pyi b/numpy/typing/tests/data/reveal/ufunc_config.pyi index 9d74abf42322..b98157d1d451 100644 --- a/numpy/typing/tests/data/reveal/ufunc_config.pyi +++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -1,24 +1,18 @@ """Typing tests for `_core._ufunc_config`.""" -import sys -from typing import Any, Protocol +from _typeshed import SupportsWrite +from typing import Any from collections.abc import Callable import numpy as np -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type def func(a: str, b: int) -> None: ... class Write: def write(self, value: str) -> None: ... -class SupportsWrite(Protocol): - def write(self, s: str, /) -> object: ... - assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) @@ -29,9 +23,9 @@ assert_type(np.geterr(), np._core._ufunc_config._ErrDict) assert_type(np.setbufsize(4096), int) assert_type(np.getbufsize(), int) -assert_type(np.seterrcall(func), Callable[[str, int], Any] | None | SupportsWrite) -assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | None | SupportsWrite) -assert_type(np.geterrcall(), Callable[[str, int], Any] | None | SupportsWrite) +assert_type(np.seterrcall(func), Callable[[str, int], Any] | None | SupportsWrite[str]) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | None | SupportsWrite[str]) +assert_type(np.geterrcall(), Callable[[str, int], Any] | None | SupportsWrite[str]) assert_type(np.errstate(call=func, all="call"), np.errstate) assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate) diff --git a/numpy/typing/tests/data/reveal/ufunclike.pyi b/numpy/typing/tests/data/reveal/ufunclike.pyi index e29e76ed14e4..2a0c6c65ea5d 100644 --- a/numpy/typing/tests/data/reveal/ufunclike.pyi +++ b/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -1,13 +1,9 @@ -import sys from typing import Any import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 39a796bf6845..8d3527ac8415 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -1,13 +1,9 @@ -import sys from typing import Literal, Any, NoReturn import numpy as np import numpy.typing as npt -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type i8: np.int64 f8: np.float64 @@ -18,6 +14,7 @@ assert_type(np.absolute.__doc__, str) assert_type(np.absolute.types, list[str]) assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) assert_type(np.absolute.ntypes, Literal[20]) assert_type(np.absolute.identity, None) assert_type(np.absolute.nin, Literal[1]) @@ -30,6 +27,7 @@ assert_type(np.absolute(AR_f8), npt.NDArray[Any]) assert_type(np.absolute.at(AR_f8, AR_i8), None) assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) assert_type(np.add.ntypes, Literal[22]) assert_type(np.add.identity, Literal[0]) assert_type(np.add.nin, Literal[2]) @@ -46,6 +44,7 @@ assert_type(np.add.outer(f8, f8), Any) assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) assert_type(np.frexp.ntypes, Literal[4]) assert_type(np.frexp.identity, None) assert_type(np.frexp.nin, Literal[1]) @@ -56,6 +55,7 @@ assert_type(np.frexp(f8), tuple[Any, Any]) assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) assert_type(np.divmod.ntypes, Literal[15]) assert_type(np.divmod.identity, None) assert_type(np.divmod.nin, Literal[2]) @@ -66,6 +66,7 @@ assert_type(np.divmod(f8, f8), tuple[Any, Any]) assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) assert_type(np.matmul.ntypes, Literal[19]) assert_type(np.matmul.identity, None) assert_type(np.matmul.nin, Literal[2]) @@ -77,6 +78,7 @@ assert_type(np.matmul(AR_f8, AR_f8), Any) assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) assert_type(np.vecdot.ntypes, Literal[19]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot.nin, Literal[2]) @@ -86,7 +88,8 @@ assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot(AR_f8, AR_f8), Any) -assert_type(np.bitwise_count.__name__, Literal['bitwise_count']) +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) assert_type(np.bitwise_count.ntypes, Literal[11]) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count.nin, Literal[1]) diff --git a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi index e498fee0d3cc..9b1e23dfb081 100644 --- a/numpy/typing/tests/data/reveal/warnings_and_errors.pyi +++ b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -1,11 +1,6 @@ -import sys - import numpy.exceptions as ex -if sys.version_info >= (3, 11): - from typing import assert_type -else: - from typing_extensions import assert_type +from typing_extensions import assert_type assert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning) assert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index dc65a51a2027..86d6f0d4df26 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -5,7 +5,6 @@ import re import shutil from collections import defaultdict -from collections.abc import Iterator from typing import TYPE_CHECKING import pytest @@ -34,6 +33,7 @@ NO_MYPY = False if TYPE_CHECKING: + from collections.abc import Iterator # We need this as annotation, but it's located in a private namespace. # As a compromise, do *not* import it during runtime from _pytest.mark.structures import ParameterSet @@ -168,9 +168,9 @@ def test_fail(path: str) -> None: target_line = lines[lineno - 1] if "# E:" in target_line: expression, _, marker = target_line.partition(" # E: ") - expected_error = errors[lineno].strip() - marker = marker.strip() - _test_fail(path, expression, marker, expected_error, lineno) + error = errors[lineno].strip() + expected_error = marker.strip() + _test_fail(path, expression, error, expected_error, lineno) else: pytest.fail( f"Unexpected mypy output at line {lineno}\n\n{errors[lineno]}" @@ -200,7 +200,7 @@ def _test_fail( ) -> None: if expected_error is None: raise AssertionError(_FAIL_MSG1.format(lineno, expression, error)) - elif error not in expected_error: + elif expected_error not in error: raise AssertionError(_FAIL_MSG2.format( lineno, expression, expected_error, error )) diff --git a/numpy/version.pyi b/numpy/version.pyi index 1262189f2f38..52ca38df1918 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,10 +1,6 @@ -import sys -from typing import Final, TypeAlias +from typing import Final -if sys.version_info >= (3, 11): - from typing import LiteralString -else: - LiteralString: TypeAlias = str +from typing_extensions import LiteralString __all__ = ( '__version__', diff --git a/pavement.py b/pavement.py index 43dc28675eb9..6b6a0668b7a1 100644 --- a/pavement.py +++ b/pavement.py @@ -23,8 +23,6 @@ the same underlying python for egg install in venv and for bdist_mpkg """ import os -import sys -import shutil import hashlib import textwrap @@ -38,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.1.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.3-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index ad4673949a10..b4f39af4d56c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.1.0.dev0" +version = "2.2.3" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -29,6 +29,7 @@ classifiers = [ 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', @@ -149,7 +150,10 @@ test-command = "bash {project}/tools/wheels/cibw_test_command.sh {project}" [tool.cibuildwheel.linux] manylinux-x86_64-image = "manylinux2014" manylinux-aarch64-image = "manylinux2014" -musllinux-x86_64-image = "musllinux_1_1" +musllinux-x86_64-image = "musllinux_1_2" + +[tool.cibuildwheel.pyodide] +config-settings = "build-dir=build setup-args=--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross setup-args=-Dblas=none setup-args=-Dlapack=none" [tool.cibuildwheel.linux.environment] # RUNNER_OS is a GitHub Actions specific env var; define it here so it works on Cirrus CI too @@ -180,6 +184,13 @@ select = "*-win32" config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" repair-wheel-command = "" +[[tool.cibuildwheel.overrides]] +select = "*pyodide*" +before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" +# Pyodide ensures that the wheels are already repaired by auditwheel-emscripten +repair-wheel-command = "" +test-command = "python -m pytest --pyargs numpy -m 'not slow'" + [tool.meson-python] meson = 'vendored-meson/meson/meson.py' diff --git a/requirements/build_requirements.txt b/requirements/build_requirements.txt index 701867b64465..a51143a780e7 100644 --- a/requirements/build_requirements.txt +++ b/requirements/build_requirements.txt @@ -1,5 +1,5 @@ meson-python>=0.13.1 Cython>=3.0.6 ninja -spin==0.8 +spin==0.13 build diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index e134b0dae82e..437dbc90a9b7 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ -spin +spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.27.44.3 +scipy-openblas32==0.3.28.0.2 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index f688bfb6eb3a..ab255e648527 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ -spin +spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.27.44.3 -scipy-openblas64==0.3.27.44.3 +scipy-openblas32==0.3.28.0.2 +scipy-openblas64==0.3.28.0.2 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 79de7a9f0802..4dcf2a788df0 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -16,3 +16,6 @@ pickleshare # needed to build release notes towncrier toml + +# for doctests, also needs pytz which is in test_requirements +scipy-doctest==1.5.1 diff --git a/requirements/linter_requirements.txt b/requirements/linter_requirements.txt index 2e0298baed52..c003901cc023 100644 --- a/requirements/linter_requirements.txt +++ b/requirements/linter_requirements.txt @@ -1,2 +1,2 @@ -pycodestyle==2.8.0 +pycodestyle==2.12.1 GitPython>=3.1.30 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index ec7827b7e50e..93e441f61310 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,8 +1,7 @@ Cython wheel==0.38.1 -#setuptools==65.5.1 ; python_version < '3.12' -#setuptools ; python_version >= '3.12' -setuptools +setuptools==65.5.1 ; python_version < '3.12' +setuptools ; python_version >= '3.12' hypothesis==6.104.1 pytest==7.4.0 pytz==2023.3.post1 @@ -10,12 +9,13 @@ pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" pytest-xdist +pytest-timeout # for numpy.random.test.test_extending cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.10.0; platform_python_implementation != "PyPy" +mypy==1.14.1; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index 2e5a4c270376..1825cbf8a822 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -12,7 +12,7 @@ try: import pygments if tuple([int(x) for x in pygments.__version__.split('.')]) < (0, 11): - raise ImportError() + raise ImportError from pygments import highlight from pygments.lexers import CLexer from pygments.formatters import HtmlFormatter @@ -122,7 +122,7 @@ def collect_stats(files, fd, pattern): current_file = None current_function = None - for i, line in enumerate(fd): + for line in fd: if re.match("f[lie]=.+", line): path = line.split('=', 2)[1].strip() if os.path.exists(path) and re.search(pattern, path): diff --git a/tools/changelog.py b/tools/changelog.py index 7b7e66ddb511..4498bb93bd9a 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -34,7 +34,6 @@ """ import os -import sys import re from git import Repo from github import Github @@ -75,7 +74,7 @@ def get_authors(revision_range): # Append '+' to new authors. authors_new = [s + ' +' for s in authors_cur - authors_pre] - authors_old = [s for s in authors_cur & authors_pre] + authors_old = list(authors_cur & authors_pre) authors = authors_new + authors_old authors.sort() return authors @@ -133,14 +132,47 @@ def main(token, revision_range): print("="*len(heading)) print(pull_request_msg % len(pull_requests)) + def backtick_repl(matchobj): + """repl to add an escaped space following a code block if needed""" + if matchobj.group(2) != ' ': + post = r' ' + matchobj.group(2) + else: + post = matchobj.group(2) + return '``' + matchobj.group(1) + '``' + post + for pull in pull_requests: + # sanitize whitespace title = re.sub(r"\s+", " ", pull.title.strip()) + + # substitute any single backtick not adjacent to a backtick + # for a double backtick + title = re.sub( + "(?P
(?:^|(?<=[^`])))`(?P(?=[^`]|$))",
+            r"\g
``\g",
+            title
+        )
+        # add an escaped space if code block is not followed by a space
+        title = re.sub("``(.*?)``(.)", backtick_repl, title)
+
+        # sanitize asterisks
+        title = title.replace('*', '\\*')
+
         if len(title) > 60:
             remainder = re.sub(r"\s.*$", "...", title[60:])
             if len(remainder) > 20:
-                remainder = title[:80] + "..."
+                # just use the first 80 characters, with ellipses.
+                # note: this was previously bugged,
+                # assigning to `remainder` rather than `title`
+                title = title[:80] + "..."
             else:
+                # use the first 60 characters and the next word
                 title = title[:60] + remainder
+
+            if title.count('`') % 4 != 0:
+                # ellipses have cut in the middle of a code block,
+                # so finish the code block before the ellipses
+                title = title[:-3] + '``...'
+
         print(pull_msg.format(pull.number, pull.html_url, title))
 
 
diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py
index c45a046b1ca2..cd207ca776e8 100644
--- a/tools/check_installed_files.py
+++ b/tools/check_installed_files.py
@@ -117,7 +117,7 @@ def get_files(dir_to_check, kind='test'):
 
     for key in targets.keys():
         for values in list(targets[key].values()):
-            if not values['tag'] in all_tags:
+            if values['tag'] not in all_tags:
                 all_tags.add(values['tag'])
 
     if all_tags != set(['runtime', 'python-runtime', 'devel', 'tests']):
diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml
index f63274e5af3f..aa1063d9f81d 100644
--- a/tools/ci/cirrus_wheels.yml
+++ b/tools/ci/cirrus_wheels.yml
@@ -65,7 +65,7 @@ macosx_arm64_task:
 
   matrix:
     - env:
-        CIBW_BUILD: cp310-* cp311
+        CIBW_BUILD: cp310-* cp311-*
     - env:
         CIBW_BUILD: cp312-* cp313-*
     - env:
@@ -78,7 +78,7 @@ macosx_arm64_task:
 
   build_script: |
     brew install micromamba gfortran
-    micromamba shell init -s bash -p ~/micromamba
+    micromamba shell init -s bash --root-prefix ~/micromamba
     source ~/.bash_profile
     
     micromamba create -n numpydev
@@ -124,7 +124,7 @@ wheels_upload_task:
 
   env:
     NUMPY_STAGING_UPLOAD_TOKEN: ENCRYPTED[!5a69522ae0c2af9edb2bc1cdfeaca6292fb3666d9ecd82dca0615921834a6ce3b702352835d8bde4ea2a9ed5ef8424ac!]
-    NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[ef04347663cfcb58d121385707e55951dc8e03b009edeed988aa4a33ba8205c54ca9980ac4da88e1adfdebff8b9d7ed4]
+    NUMPY_NIGHTLY_UPLOAD_TOKEN: ENCRYPTED[4376691390321cd5e76613ec21de8456cc0af0164971dd9542f985a017dc30ccb4d40e60f59184618e2d55afd63e93b7]
 
   upload_script: |
     apt-get update
diff --git a/tools/download-wheels.py b/tools/download-wheels.py
index e5753eb2148c..54dbdf1200a8 100644
--- a/tools/download-wheels.py
+++ b/tools/download-wheels.py
@@ -56,15 +56,20 @@ def get_wheel_names(version):
         The release version. For instance, "1.18.3".
 
     """
+    ret = []
     http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED")
     tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}")
-    index_url = f"{STAGING_URL}/files"
-    index_html = http.request("GET", index_url)
-    soup = BeautifulSoup(index_html.data, "html.parser")
-    return soup.find_all(string=tmpl)
+    # TODO: generalize this by searching for `showing 1 of N` and
+    # looping over N pages, starting from 1
+    for i in range(1, 3):
+        index_url = f"{STAGING_URL}/files?page={i}"
+        index_html = http.request("GET", index_url)
+        soup = BeautifulSoup(index_html.data, "html.parser")
+        ret += soup.find_all(string=tmpl)
+    return ret
 
 
-def download_wheels(version, wheelhouse):
+def download_wheels(version, wheelhouse, test=False):
     """Download release wheels.
 
     The release wheels for the given NumPy version are downloaded
@@ -86,8 +91,15 @@ def download_wheels(version, wheelhouse):
         wheel_path = os.path.join(wheelhouse, wheel_name)
         with open(wheel_path, "wb") as f:
             with http.request("GET", wheel_url, preload_content=False,) as r:
-                print(f"{i + 1:<4}{wheel_name}")
-                shutil.copyfileobj(r, f)
+                info = r.info()
+                length = int(info.get('Content-Length', '0'))
+                if length == 0:
+                    length = 'unknown size'
+                else:
+                    length = f"{(length / 1024 / 1024):.2f}MB"
+                print(f"{i + 1:<4}{wheel_name} {length}")
+                if not test:
+                    shutil.copyfileobj(r, f)
     print(f"\nTotal files downloaded: {len(wheel_names)}")
 
 
@@ -101,6 +113,10 @@ def download_wheels(version, wheelhouse):
         default=os.path.join(os.getcwd(), "release", "installers"),
         help="Directory in which to store downloaded wheels\n"
              "[defaults to /release/installers]")
+    parser.add_argument(
+        "-t", "--test",
+        action = 'store_true',
+        help="only list available wheels, do not download")
 
     args = parser.parse_args()
 
@@ -110,4 +126,4 @@ def download_wheels(version, wheelhouse):
             f"{wheelhouse} wheelhouse directory is not present."
             " Perhaps you need to use the '-w' flag to specify one.")
 
-    download_wheels(args.version, wheelhouse)
+    download_wheels(args.version, wheelhouse, test=args.test)
diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini
index dbebe483b4ab..810e265d4dec 100644
--- a/tools/lint_diff.ini
+++ b/tools/lint_diff.ini
@@ -1,5 +1,5 @@
 [pycodestyle]
-max_line_length = 79
+max_line_length = 88
 statistics = True
 ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504
 exclude = numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py
diff --git a/tools/refguide_check.py b/tools/refguide_check.py
index 8de816715bdb..f3e548dedda2 100644
--- a/tools/refguide_check.py
+++ b/tools/refguide_check.py
@@ -31,18 +31,13 @@
 import io
 import os
 import re
-import shutil
 import sys
-import tempfile
 import warnings
 import docutils.core
 from argparse import ArgumentParser
-from contextlib import contextmanager, redirect_stderr
 
 from docutils.parsers.rst import directives
 
-import sphinx
-import numpy as np
 
 sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
 from numpydoc.docscrape_sphinx import get_doc_object
@@ -186,7 +181,7 @@ def find_names(module, names_dict):
             res = re.match(pattern, line)
             if res is not None:
                 name = res.group(1)
-                entry = '.'.join([module_name, name])
+                entry = f'{module_name}.{name}'
                 names_dict.setdefault(module_name, set()).add(name)
                 break
 
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index c8c26cbcd3d6..747446648c8b 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -1989,7 +1989,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY1[ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
@@ -2018,7 +2018,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1)
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
@@ -2047,7 +2047,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1)
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
@@ -2065,7 +2065,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
@@ -2083,7 +2083,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
@@ -2101,7 +2101,7 @@ void free_cap(PyObject * cap)
 %typemap(argout)
   (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY])
 {
-  $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum);
+  $result = SWIG_AppendOutput($result,(PyObject*)array$argnum);
 }
 
 /*****************************/
@@ -2126,7 +2126,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1)
@@ -2147,7 +2147,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2169,7 +2169,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2)
@@ -2191,7 +2191,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2213,7 +2213,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2)
@@ -2235,7 +2235,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2259,7 +2259,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2283,7 +2283,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2307,7 +2307,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2331,7 +2331,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2356,7 +2356,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2381,7 +2381,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2406,7 +2406,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2431,7 +2431,7 @@ void free_cap(PyObject * cap)
   PyArrayObject* array = (PyArrayObject*) obj;
 
   if (!array || !require_fortran(array)) SWIG_fail;
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /*************************************/
@@ -2465,7 +2465,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1)
@@ -2495,7 +2495,7 @@ PyObject* cap = PyCapsule_New((void*)(*$2), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2526,7 +2526,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2)
@@ -2557,7 +2557,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2)
@@ -2588,7 +2588,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2)
@@ -2619,7 +2619,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2652,7 +2652,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2685,7 +2685,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2718,7 +2718,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3,
@@ -2751,7 +2751,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2785,7 +2785,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2819,7 +2819,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2,
@@ -2853,7 +2853,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4,
@@ -2887,7 +2887,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap);
   PyArray_SetBaseObject(array,cap);
 %#endif
 
-  $result = SWIG_Python_AppendOutput($result,obj);
+  $result = SWIG_AppendOutput($result,obj);
 }
 
 /**************************************/
diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py
index 49011bb13304..d6a963d2ad90 100755
--- a/tools/swig/test/testArray.py
+++ b/tools/swig/test/testArray.py
@@ -39,7 +39,8 @@ def testConstructor2(self):
 
     def testConstructor3(self):
         "Test Array1 copy constructor"
-        for i in range(self.array1.length()): self.array1[i] = i
+        for i in range(self.array1.length()):
+            self.array1[i] = i
         arrayCopy = Array.Array1(self.array1)
         self.assertTrue(arrayCopy == self.array1)
 
@@ -97,17 +98,20 @@ def testGetBad2(self):
 
     def testAsString(self):
         "Test Array1 asString method"
-        for i in range(self.array1.length()): self.array1[i] = i+1
+        for i in range(self.array1.length()):
+            self.array1[i] = i+1
         self.assertTrue(self.array1.asString() == "[ 1, 2, 3, 4, 5 ]")
 
     def testStr(self):
         "Test Array1 __str__ method"
-        for i in range(self.array1.length()): self.array1[i] = i-2
+        for i in range(self.array1.length()):
+            self.array1[i] = i-2
         self.assertTrue(str(self.array1) == "[ -2, -1, 0, 1, 2 ]")
 
     def testView(self):
         "Test Array1 view method"
-        for i in range(self.array1.length()): self.array1[i] = i+1
+        for i in range(self.array1.length()):
+            self.array1[i] = i+1
         a = self.array1.view()
         self.assertTrue(isinstance(a, np.ndarray))
         self.assertTrue(len(a) == self.length)
@@ -289,7 +293,8 @@ def testConstructor2(self):
 
     def testConstructor3(self):
         "Test ArrayZ copy constructor"
-        for i in range(self.array3.length()): self.array3[i] = complex(i,-i)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i, -i)
         arrayCopy = Array.ArrayZ(self.array3)
         self.assertTrue(arrayCopy == self.array3)
 
@@ -347,17 +352,20 @@ def testGetBad2(self):
 
     def testAsString(self):
         "Test ArrayZ asString method"
-        for i in range(self.array3.length()): self.array3[i] = complex(i+1,-i-1)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i+1, -i-1)
         self.assertTrue(self.array3.asString() == "[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]")
 
     def testStr(self):
         "Test ArrayZ __str__ method"
-        for i in range(self.array3.length()): self.array3[i] = complex(i-2,(i-2)*2)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i-2, (i-2)*2)
         self.assertTrue(str(self.array3) == "[ (-2,-4), (-1,-2), (0,0), (1,2), (2,4) ]")
 
     def testView(self):
         "Test ArrayZ view method"
-        for i in range(self.array3.length()): self.array3[i] = complex(i+1,i+2)
+        for i in range(self.array3.length()):
+            self.array3[i] = complex(i+1, i+2)
         a = self.array3.view()
         self.assertTrue(isinstance(a, np.ndarray))
         self.assertTrue(len(a) == self.length)
diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py
index 29bf96fe2f68..c5beed92e4a1 100755
--- a/tools/swig/test/testFarray.py
+++ b/tools/swig/test/testFarray.py
@@ -8,8 +8,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 # Add the distutils-generated build directory to the python search path and then
 # import the extension module
diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py
index e3e456a56415..75f9183a39d9 100755
--- a/tools/swig/test/testFlat.py
+++ b/tools/swig/test/testFlat.py
@@ -8,8 +8,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Flat
 
diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py
index 348355afcba8..bd03e1fc526a 100644
--- a/tools/swig/test/testFortran.py
+++ b/tools/swig/test/testFortran.py
@@ -6,8 +6,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Fortran
 
diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py
index 814c0d578039..d218ca21cc22 100755
--- a/tools/swig/test/testMatrix.py
+++ b/tools/swig/test/testMatrix.py
@@ -6,8 +6,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Matrix
 
diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py
index 121c4a405805..0bb9b081a4da 100644
--- a/tools/swig/test/testSuperTensor.py
+++ b/tools/swig/test/testSuperTensor.py
@@ -6,8 +6,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import SuperTensor
 
diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py
index 164ceb2d5626..f9399487c077 100755
--- a/tools/swig/test/testTensor.py
+++ b/tools/swig/test/testTensor.py
@@ -7,8 +7,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Tensor
 
diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py
index 1a663d1db83b..edb771966541 100755
--- a/tools/swig/test/testVector.py
+++ b/tools/swig/test/testVector.py
@@ -6,8 +6,10 @@
 # Import NumPy
 import numpy as np
 major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
-if major == 0: BadListError = TypeError
-else:          BadListError = ValueError
+if major == 0:
+    BadListError = TypeError
+else:
+    BadListError = ValueError
 
 import Vector
 
diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt
index a5b5ae5c22e6..021b4b0289e7 100644
--- a/tools/wheels/LICENSE_linux.txt
+++ b/tools/wheels/LICENSE_linux.txt
@@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software:
 
 
 Name: OpenBLAS
-Files: numpy.libs/libopenblas*.so
+Files: numpy.libs/libscipy_openblas*.so
 Description: bundled as a dynamically linked library
 Availability: https://github.com/OpenMathLib/OpenBLAS/
 License: BSD-3-Clause
@@ -41,7 +41,7 @@ License: BSD-3-Clause
 
 
 Name: LAPACK
-Files: numpy.libs/libopenblas*.so
+Files: numpy.libs/libscipy_openblas*.so
 Description: bundled in OpenBLAS
 Availability: https://github.com/OpenMathLib/OpenBLAS/
 License: BSD-3-Clause-Attribution
diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt
index 1ebd5663d02c..81889131cfa7 100644
--- a/tools/wheels/LICENSE_osx.txt
+++ b/tools/wheels/LICENSE_osx.txt
@@ -4,7 +4,7 @@
 This binary distribution of NumPy also bundles the following software:
 
 Name: OpenBLAS
-Files: numpy/.dylibs/libopenblas*.so
+Files: numpy/.dylibs/libscipy_openblas*.so
 Description: bundled as a dynamically linked library
 Availability: https://github.com/OpenMathLib/OpenBLAS/
 License: BSD-3-Clause
@@ -40,7 +40,7 @@ License: BSD-3-Clause
 
 
 Name: LAPACK
-Files: numpy/.dylibs/libopenblas*.so
+Files: numpy/.dylibs/libscipy_openblas*.so
 Description: bundled in OpenBLAS
 Availability: https://github.com/OpenMathLib/OpenBLAS/
 License: BSD-3-Clause-Attribution
diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt
index f8eaaf1cae25..a2ccce66fbe5 100644
--- a/tools/wheels/LICENSE_win32.txt
+++ b/tools/wheels/LICENSE_win32.txt
@@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software:
 
 
 Name: OpenBLAS
-Files: numpy.libs\libopenblas*.dll
+Files: numpy.libs\libscipy_openblas*.dll
 Description: bundled as a dynamically linked library
 Availability: https://github.com/OpenMathLib/OpenBLAS/
 License: BSD-3-Clause
@@ -41,7 +41,7 @@ License: BSD-3-Clause
 
 
 Name: LAPACK
-Files: numpy.libs\libopenblas*.dll
+Files: numpy.libs\libscipy_openblas*.dll
 Description: bundled in OpenBLAS
 Availability: https://github.com/OpenMathLib/OpenBLAS/
 License: BSD-3-Clause-Attribution
@@ -96,7 +96,7 @@ License: BSD-3-Clause-Attribution
 
 
 Name: GCC runtime library
-Files: numpy.libs\libgfortran*.dll
+Files: numpy.libs\libscipy_openblas*.dll
 Description: statically linked to files compiled with gcc
 Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
 License: GPL-3.0-with-GCC-exception
@@ -879,24 +879,3 @@ the library.  If this is what you want to do, use the GNU Lesser General
 Public License instead of this License.  But first, please read
 .
 
-Name: libquadmath
-Files: numpy.libs\libopenb*.dll
-Description: statically linked to files compiled with gcc
-Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
-License: LGPL-2.1-or-later
-
-    GCC Quad-Precision Math Library
-    Copyright (C) 2010-2019 Free Software Foundation, Inc.
-    Written by Francois-Xavier Coudert  
-
-    This file is part of the libquadmath library.
-    Libquadmath is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Library General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    Libquadmath is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-    https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
diff --git a/tools/wheels/check_license.py b/tools/wheels/check_license.py
index 7d0ef7921a4e..99db0744d9fb 100644
--- a/tools/wheels/check_license.py
+++ b/tools/wheels/check_license.py
@@ -35,7 +35,7 @@ def main():
 
     # LICENSE.txt is installed in the .dist-info directory, so find it there
     sitepkgs = pathlib.Path(mod.__file__).parent.parent
-    distinfo_path = [s for s in sitepkgs.glob("numpy-*.dist-info")][0]
+    distinfo_path = list(sitepkgs.glob("numpy-*.dist-info"))[0]
 
     # Check license text
     license_txt = distinfo_path / "LICENSE.txt"
diff --git a/vendored-meson/meson b/vendored-meson/meson
index 6f88e485f27b..0d93515fb826 160000
--- a/vendored-meson/meson
+++ b/vendored-meson/meson
@@ -1 +1 @@
-Subproject commit 6f88e485f27bb0a41d31638f0c55055362e0b1ac
+Subproject commit 0d93515fb826440d19707eee47fd92655fe2f166