diff --git a/.circleci/config.yml b/.circleci/config.yml index e6ec8cc783bd..8c2b443f1e84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.8 + - image: cimg/python:3.11.10 working_directory: ~/repo @@ -60,7 +60,7 @@ jobs: # get newer, pre-release versions of critical packages pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above - spin build --with-scipy-openblas=64 + spin build --with-scipy-openblas=64 -j 2 - run: name: build devdocs w/ref warnings @@ -97,8 +97,8 @@ jobs: # - validates ReST blocks (via validate_rst_syntax) # - checks that all of a module's `__all__` is reflected in the # module-level docstring autosummary - echo calling python tools/refguide_check.py -v - python tools/refguide_check.py -v + echo calling python3 tools/refguide_check.py -v + python3 tools/refguide_check.py -v - persist_to_workspace: root: ~/repo diff --git a/.github/ISSUE_TEMPLATE/typing.yml b/.github/ISSUE_TEMPLATE/typing.yml index a35b339e4883..17eedfae1c6c 100644 --- a/.github/ISSUE_TEMPLATE/typing.yml +++ b/.github/ISSUE_TEMPLATE/typing.yml @@ -1,7 +1,7 @@ name: Static Typing description: Report an issue with the NumPy typing hints. title: "TYP: " -labels: [Static typing] +labels: [41 - Static typing] body: - type: markdown diff --git a/.github/pr-prefix-labeler.yml b/.github/pr-prefix-labeler.yml index 4905b502045d..65ed35aa1a11 100644 --- a/.github/pr-prefix-labeler.yml +++ b/.github/pr-prefix-labeler.yml @@ -12,5 +12,5 @@ "REV": "34 - Reversion" "STY": "03 - Maintenance" "TST": "05 - Testing" -"TYP": "static typing" +"TYP": "41 - Static typing" "WIP": "25 - WIP" diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml new file mode 100644 index 000000000000..09e71051556d --- /dev/null +++ b/.github/workflows/compiler_sanitizers.yml @@ -0,0 +1,127 @@ +name: Test with compiler sanitizers + +on: + push: + branches: + - main + pull_request: + branches: + - main + - maintenance/** + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + clang_ASAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@19 + LLVM_PREFIX=$(brew --prefix llvm@19) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + pyenv global 3.13 + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=address + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ + python -m spin test -- -v -s --timeout=600 --durations=10 + + clang_TSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@19 + LLVM_PREFIX=$(brew --prefix llvm@19) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with thread sanitizer support + run: | + # free-threaded Python is much more likely to trigger races + CONFIGURE_OPTS="--with-thread-sanitizer" pyenv install 3.13t + pyenv global 3.13t + - name: Install dependencies + run: | + # TODO: remove when a released cython supports free-threaded python + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the TSAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=thread + - name: Test + run: | + # These tests are slow, so only run tests in files that do "import threading" to make them count + TSAN_OPTIONS="allocator_may_return_null=1:suppressions=$GITHUB_WORKSPACE/tools/ci/tsan_suppressions.txt" \ + python -m spin test \ + `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ + -- -v -s --timeout=600 --durations=10 diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 6ce78801a5e1..57e5cf53f225 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -152,6 +152,53 @@ jobs: run: | pytest numpy --cov-report=html:build/coverage # TODO: gcov + env: + PYTHONOPTIMIZE: 2 + + + armhf_test: + # Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode + # running on aarch64 (ARM 64-bit) GitHub runners. + needs: [smoke_test] + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-22.04-arm + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Creates new container + run: | + docker run --name the_container --interactive \ + -v $(pwd):/numpy arm32v7/ubuntu:22.04 /bin/linux32 /bin/bash -c " + apt update && + apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv && + python -m pip install -r /numpy/requirements/build_requirements.txt && + python -m pip install -r /numpy/requirements/test_requirements.txt + " + docker commit the_container the_container + + - name: Meson Build + run: | + docker run --rm -e "TERM=xterm-256color" \ + -v $(pwd):/numpy the_container \ + /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin build + '" + + - name: Meson Log + if: always() + run: 'cat build/meson-logs/meson-log.txt' + + - name: Run Tests + run: | + docker run --rm -e "TERM=xterm-256color" \ + -v $(pwd):/numpy the_container \ + /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin test -m full -- --timeout=600 --durations=10 + '" benchmark: needs: [smoke_test] @@ -186,7 +233,7 @@ jobs: - name: Check docstests shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | - pip install scipy-doctest hypothesis matplotlib scipy pytz pandas + pip install scipy-doctest==1.5.1 hypothesis==6.104.1 matplotlib scipy pytz pandas spin check-docs -v spin check-tutorials -v diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml index 18a6a5eefe4a..67226278171e 100644 --- a/.github/workflows/linux_musl.yml +++ b/.github/workflows/linux_musl.yml @@ -60,8 +60,6 @@ jobs: pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt # use meson to build and test - # the Duse-ilp64 is not needed with scipy-openblas wheels > 0.3.24.95.0 - # spin build --with-scipy-openblas=64 -- -Duse-ilp64=true spin build --with-scipy-openblas=64 spin test -j auto diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 32d2063bd8ec..de7f6f816cee 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -14,6 +14,7 @@ on: branches: - main - maintenance/** + workflow_dispatch: defaults: run: @@ -28,25 +29,15 @@ permissions: jobs: linux_qemu: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' + # Only workflow_dispatch is enabled on forks. + # To enable this job and subsequent jobs on a fork for other events, comment out: + if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' runs-on: ubuntu-22.04 continue-on-error: true strategy: fail-fast: false matrix: BUILD_PROP: - - [ - "armhf", - "arm-linux-gnueabihf", - "arm32v7/ubuntu:22.04", - "-Dallow-noblas=true", - # test_unary_spurious_fpexception is currently skipped - # FIXME(@seiko2plus): Requires confirmation for the following issue: - # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception", - "arm" - ] - [ "ppc64le", "powerpc64le-linux-gnu", @@ -107,7 +98,8 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + # see https://hub.docker.com/r/tonistiigi/binfmt for available versions + docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all - name: Install GCC cross-compilers run: | @@ -115,7 +107,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.1.2 + uses: actions/cache@v4.2.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -141,7 +133,9 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install -r /numpy/requirements/build_requirements.txt && + # No need to build ninja from source, the host ninja is used for the build + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install -r /tmp/build_requirements.txt && python -m pip install pytest pytest-xdist hypothesis typing_extensions && rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " @@ -174,4 +168,3 @@ jobs: cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" - diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 62fd24a4e337..d40ef9f60f20 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 diff --git a/.mailmap b/.mailmap index 23a556dd9fc4..f33dfddb6492 100644 --- a/.mailmap +++ b/.mailmap @@ -10,6 +10,7 @@ !8bitmp3 <19637339+8bitmp3@users.noreply.github.com> !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> +!Dreamge !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -20,13 +21,17 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> !dg3192 <113710955+dg3192@users.noreply.github.com> !ellaella12 !ellaella12 <120079323+ellaella12@users.noreply.github.com> +!fengluoqiuwu +!fengluoqiuwu <163119756+fengluoqiuwu@users.noreply.github.com> !h-vetinari !h6197627 <44726212+h6197627@users.noreply.github.com> +!hutauf !jbCodeHub !juztamau5 !legoffant <58195095+legoffant@users.noreply.github.com> @@ -37,7 +42,9 @@ !mcp292 !mgunyho <20118130+mgunyho@users.noreply.github.com> !msavinash <73682349+msavinash@users.noreply.github.com> +!musvaage !mykykh <49101849+mykykh@users.noreply.github.com> +!nullSoup <34267803+nullSoup@users.noreply.github.com> !ogidig5 <82846833+ogidig5@users.noreply.github.com> !partev !pkubaj @@ -58,6 +65,7 @@ !yetanothercheer Aaron Baecker Adrin Jalali +Abraham Medina Arun Kota Arun Kota Arun Kota Aarthi Agurusa @@ -142,7 +150,9 @@ Ashutosh Singh Ashutosh Singh <55102089+Ashutosh619-sudo@users.noreply.github.com> Åsmund Hjulstad Auke Wiggers +Austin Ran <504977925@qq.com> Badhri Narayanan Krishnakumar +Baskar Gopinath Bhavuk Kalra Bhavuk Kalra Bangcheng Yang @@ -154,6 +164,7 @@ Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage +Benoit Prabel Bernie Gray Bertrand Lefebvre Bharat Raghunathan @@ -193,6 +204,7 @@ Chris Vavaliaris Christian Clauss Christopher Dahlin Christopher Hanley +Christoph Buchner Christoph Gohlke Christoph Gohlke Christoph Gohlke cgholke @@ -288,6 +300,8 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Habiba Hye +Habiba Hye <145866308+HabibiHye@users.noreply.github.com> Hameer Abbasi Hannah Aizenman Han Genuit @@ -300,11 +314,13 @@ Hiroyuki V. Yamazaki Hugo van Kemenade Iantra Solari I-Shen Leong +Ishan Purekar Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst -Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> +Ishan Koradia +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -345,32 +361,34 @@ Jérôme Richard Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> -João Fontes Gonçalves -Johann Rohwer -Johann Rohwer jmrohwer -Johnathon Cusick Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Johann Faouzi +Johann Rohwer +Johann Rohwer jmrohwer Johannes Hampp <42553970+euronion@users.noreply.github.com> +Johannes Kaisinger +Johannes Kaisinger Johannes Schönberger -Johann Faouzi John Darbyshire <24256554+attack68@users.noreply.github.com> <24256554+attack68@users.noreply.github.com> John Hagen John Kirkham John Kirkham +Johnathon Cusick Johnson Sun <20457146+j3soon@users.noreply.github.com> Jonas I. Liechti Jonas I. Liechti Jonas I. Liechti +Joren Hammudoglu +Jory Klaverstijn +Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Martinot-Lagarde Joshua Himmens Joyce Brum -Joren Hammudoglu -Jory Klaverstijn -Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +João Fontes Gonçalves Julia Poo Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor @@ -381,11 +399,13 @@ Julien Schueller Junyan Ou Justus Magin Justus Magin +Kai Germaschewski Kai Striega Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karel Planken <71339309+kplanken@users.noreply.github.com> Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> Karthik Kaiplody Keller Meier @@ -398,6 +418,7 @@ Kerem Hallaç Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso +Kira Prokopenko Konrad Kapp Kristoffer Pedersen Kristoffer Pedersen @@ -526,6 +547,7 @@ Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík +Oscar Armas-Luy Óscar Villellas Guillén Pablo Losada Pablo Losada <48804010+TheHawz@users.noreply.github.com> @@ -546,6 +568,7 @@ Pearu Peterson Pete Peeradej Tanruangporn Peter Bell Peter J Cock +Peter Kämpf Peyton Murray Phil Elson Pierre GM @@ -608,6 +631,7 @@ Sebastian Schleehauf Serge Guelton Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> Shuangchi He +Shaurya Barkund <64537538+Shaurya19@users.noreply.github.com> Shubham Gupta Shubham Gupta <63910248+shubham11941140@users.noreply.github.com> Shekhar Prasad Rajak @@ -625,6 +649,7 @@ Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada +Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Stefan Behnel Stefan van der Walt Stefan van der Walt @@ -659,10 +684,13 @@ Toshiki Kataoka Travis Oliphant Travis Oliphant Travis Oliphant +Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> Valentin Haenel Valentin Haenel Vardhaman Kalloli <83634399+cyai@users.noreply.github.com> Varun Nayyar +Victor Herdeiro +Vijayakumar Z Vinith Kishore Vinith Kishore <85550536+vinith2@users.noreply.github.com> Vrinda Narayan @@ -683,6 +711,7 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau +Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2393a96d3f86..81bada011c31 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,7 @@ stages: jobs: - job: Skip pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' variables: DECODE_PERCENTS: 'false' RET: 'true' @@ -40,7 +40,7 @@ stages: - job: Lint condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' steps: - task: UsePythonVersion@0 inputs: @@ -59,7 +59,7 @@ stages: - job: Linux_Python_310_32bit_full_with_asserts pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' steps: - script: | git submodule update --init diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 42d32a3ce3b5..895c8e931590 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -16,12 +16,12 @@ 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'true_divide', 'trunc', 'vecdot'] + 'true_divide', 'trunc', 'vecdot', 'vecmat'] arrayfuncdisp = ['real', 'round'] for name in ufuncs: diff --git a/doc/changelog/2.2.0-changelog.rst b/doc/changelog/2.2.0-changelog.rst new file mode 100644 index 000000000000..b82a3d03b4fc --- /dev/null +++ b/doc/changelog/2.2.0-changelog.rst @@ -0,0 +1,437 @@ + +Contributors +============ + +A total of 106 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !Dreamge + +* !bersbersbers + +* !fengluoqiuwu + +* !h-vetinari +* !hutauf + +* !musvaage + +* !nullSoup + +* Aarni Koskela + +* Abhishek Kumar + +* Abraham Medina + +* Aditi Juneja + +* Adrien Corenflos + +* Agriya Khetarpal +* Ajay Kumar Janapareddi +* Akula Guru Datta + +* Amit Subhash Chejara + +* Andrew Nelson +* Anne Gunn +* Austin Ran + +* Ben Walsh +* Benjamin A. Beasley + +* Benoit Prabel + +* Charles Harris +* Chris Fu (傅立业) +* Chris Sidebottom +* Christian Lorentzen +* Christopher Sidebottom +* Clément Robert +* Dane Reimers + +* Dimitri Papadopoulos Orfanos +* Evgeni Burovski +* GUAN MING +* Habiba Hye + +* Harry Zhang + +* Hugo van Kemenade +* Ian Harris + +* Isaac Warren + +* Ishan Koradia + +* Ishan Purekar + +* Jake VanderPlas +* Jianyu Wen + +* Johannes Kaisinger +* John Kirkham +* Joren Hammudoglu +* João Eiras + +* KM Khalid Saifullah + +* Karel Planken + +* Katie Rust + +* Khem Raj +* Kira Prokopenko + +* Lars Grüter +* Linus Sommer +* Lucas Colley +* Luiz Eduardo Amaral +* Luke Aarohi + +* Marcel Telka + +* Mark Harfouche +* Marten van Kerkwijk +* Maryanne Wachter + +* Mateusz Sokół +* Matt Haberland +* Matthias Diener + +* Matthieu Darbois +* Matti Picus +* Maximilian Weigand + +* Melissa Weber Mendonça +* Michael Davidsaver + +* Nathan Goldbaum +* Nicolas Tessore + +* Nitish Satyavolu + +* Oscar Armas-Luy + +* Peter Hawkins +* Peter Kämpf + +* Pieter Eendebak +* Raghu Rajan + +* Raghuveer Devulapalli +* Ralf Gommers +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Ryan Teoh + +* Santhana Mikhail Antony S + +* Sayed Adel +* Sebastian Berg +* Sebastian Vittersø + +* Sebin Thomas + +* Serge Panev + +* Shaurya Barkund + +* Shiv Katira + +* Simon Altrogge +* Slava Gorloff + +* Slobodan Miletic + +* Soutrik Bandyopadhyay + +* Stan Ulbrych + +* Stefan van der Walt +* Tim Hoffmann +* Timo Röhling +* Tyler Reddy +* Vahid Tavanashad + +* Victor Herdeiro + +* Vijayakumar Z + +* Warren Weckesser +* Xiao Yuan + +* Yashasvi Misra +* bilderbuchi + +* dependabot[bot] + +Pull requests merged +==================== + +A total of 317 pull requests were merged for this release. + +* `#14622 `__: BUG: fix datetime64/timedelta64 hash and match Python +* `#15181 `__: ENH: Add nd-support to trim_zeros +* `#17780 `__: ENH, BLD: Define RISCV-32 support +* `#23547 `__: DOC: Fix a typo in description and add an example of ``numpy.tensordot`` +* `#25984 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit +* `#26398 `__: DOC: order of indices returned in tril_indices and triu_indices +* `#26406 `__: DOC: Changed vdot docs as suggested +* `#26570 `__: CI, BLD: Use ``cibuildwheel`` to build WASM NumPy wheels +* `#26642 `__: DOC: Add examples to ``np.char`` +* `#26855 `__: TYP: improved ``numpy.frompyfunc`` type hints +* `#26857 `__: MAINT: Start applying ruff/Pycodestyle rules +* `#26865 `__: TYP: add missing annotations for ``numpy.object_.__new__`` +* `#26941 `__: TYP: Non-distributive ``numpy.generic`` type args. +* `#26944 `__: TYP: Annotate ``numpy._core._type_aliases`` . +* `#26979 `__: TYP: Explicit ``numpy.__all__`` in the stubs +* `#26994 `__: TYP: Typing fixes for ``numpy.iinfo`` & ``numpy.finfo`` +* `#27049 `__: BUG: f2py: better handle filtering of public/private subroutines +* `#27088 `__: WHL: bump (musl) linux image [wheel build] +* `#27100 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27101 `__: TST, DOC: add doc and test for transpose axes with negative indices +* `#27116 `__: DOC: update NEP 50 draft status to "Final" +* `#27119 `__: ENH: Use ``PyObject_GetOptionalAttr`` +* `#27132 `__: TYP: Assume that ``typing_extensions`` is always available in... +* `#27134 `__: REL: Prepare main for 2.2.0 development +* `#27139 `__: TYP: Fixed & improved ``numpy.dtype.__new__`` +* `#27140 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27143 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27145 `__: ENH: fix thread-unsafe C API usages +* `#27147 `__: BLD: use smaller scipy-openblas builds +* `#27148 `__: BUG: Raise if histogram cannot create finite bin sizes +* `#27150 `__: TYP: Sane defaults for the platform-specific ``NBitBase`` types. +* `#27152 `__: TYP: Simplified ufunc imports in ``numpy._typing`` +* `#27153 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27154 `__: TYP: Use ``typing_extensions.Self`` in the ``numpy`` stubs +* `#27156 `__: MAINT: Remove any promotion-state switching logic +* `#27157 `__: TYP: add td64 overload for ``np.mean`` +* `#27158 `__: CI: Re-enable nightly OpenBLAS test runs +* `#27160 `__: DEP: Finalize ``bool(empty_array)`` deprecation +* `#27164 `__: MAINT: use npy_argparse for einsum +* `#27168 `__: DOC: add td64 example in ``np.mean`` +* `#27171 `__: TYP: Shape-typed array constructors: ``numpy.{empty,zeros,ones,full}`` +* `#27177 `__: TYP: 1-d ``numpy.arange`` return shape-type +* `#27178 `__: TYP,TST: Bump mypy to 1.11.1 +* `#27179 `__: TYP: Improved ``numpy.piecewise`` type-hints +* `#27182 `__: REV: Revert undef I and document it +* `#27184 `__: BUILD: update to OpenBLAS 0.3.28 +* `#27187 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27189 `__: MAINT: improve download script +* `#27202 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27203 `__: DOC: update PyArray_CheckAxis doc +* `#27207 `__: TYP: Deprecate calling ``numpy.save`` with ``fix_imports`` (PEP... +* `#27208 `__: TYP: Disallow scalars and 0d-arrays in ``numpy.nonzero`` +* `#27210 `__: TYP: Semi-transparent ``numpy.shape`` shape-type annotations. +* `#27211 `__: TYP: Stop using ``Any`` as shape-type default +* `#27215 `__: MAINT: Bump github/codeql-action from 3.26.0 to 3.26.2 +* `#27218 `__: DEV: Add ``.editorconfig`` rules for Python +* `#27219 `__: TYP: Replace ``ellipsis`` with ``types.EllipsisType`` +* `#27220 `__: TYP: Fixed & improved ``TypeVar`` use for ``numpy.char.chararray`` +* `#27221 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.6 +* `#27223 `__: DOC: add docs on thread safety in NumPy +* `#27226 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27228 `__: DOC: Remove obsolete note from the top of the 2.0.0 release notes. +* `#27235 `__: MAINT: MSVC does not support #warning directive +* `#27237 `__: TYP: Fix several typing issues in ``numpy.polynomial`` +* `#27238 `__: DOC: update ``np.unique`` docstring +* `#27242 `__: MAINT: Update main after 2.1.0 release. +* `#27246 `__: MAINT: Bump github/codeql-action from 3.26.2 to 3.26.3 +* `#27247 `__: DOC: update documentation release process +* `#27249 `__: BUG: fix reference counting bug in __array_interface__ implementation +* `#27255 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27261 `__: TST: Add regression test for missing descr in array-interface +* `#27262 `__: BUG: Fix #27256 and #27257 +* `#27268 `__: MAINT: Bump github/codeql-action from 3.26.3 to 3.26.4 +* `#27272 `__: ENH: make check-{docs,tutorials} fail on dtype mismatch +* `#27275 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27277 `__: DOC/DEV/CI: mambaforge -> miniforge +* `#27281 `__: MAINT: Bump github/codeql-action from 3.26.4 to 3.26.5 +* `#27284 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27286 `__: MAINT: Update main after the 2.0.2 release +* `#27289 `__: MAINT: Start applying ruff rules (RUF) +* `#27290 `__: MAINT: Keep applying ruff/pyupgrade rules (UP) +* `#27291 `__: DOC, MAINT: Fix new typos found by codespell +* `#27292 `__: MAINT: Start applying ruff/flake8-type-checking rules (TCH) +* `#27293 `__: MAINT: Keep applying ruff/flake8-bugbear rules (B) +* `#27294 `__: BUILD: refactor circleci to use spin [skip actions][skip azp][skip... +* `#27295 `__: MAINT: Start applying rruff/flake8-pie rules (PIE) +* `#27296 `__: MAINT: Start applying ruff/flake8-comprehensions rules (C4) +* `#27297 `__: MAINT: Apply ruff/flake8-raise rules (RSE) +* `#27298 `__: MAINT: Apply ruff/flynt rules (FLY) +* `#27302 `__: BUG: Fix bug in ``doc/neps/tools/build_index.py`` +* `#27307 `__: MAINT: Apply ruff/pycodestyle warning rules (W) +* `#27311 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27312 `__: MAINT: Bump github/codeql-action from 3.26.5 to 3.26.6 +* `#27316 `__: BUILD: update pypy test version +* `#27320 `__: MAINT: increase max line length from 79 to 88, upgrade pycodestyle +* `#27322 `__: DOC: Removed reference to deprecated "newshape" parameter in... +* `#27323 `__: TYP: add ``ma.zeros_like`` and ``ma.ones_like`` typing +* `#27326 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27330 `__: BLD: Win-arm64 cross compile workflow +* `#27331 `__: MAINT: GitHub Actions: Replace deprecated macos-12 with macos-latest +* `#27332 `__: MAINT: Update main after 2.1.1 release. +* `#27334 `__: TYP: Concrete ``float64`` and ``complex128`` scalar types with... +* `#27335 `__: ENH: Add ``allow_pickle`` flag to ``savez`` +* `#27344 `__: MAINT: fix typos +* `#27346 `__: BUG,TYP: Allow subscripting ``iinfo`` and ``finfo`` generic types... +* `#27347 `__: DOC: Mention that c is reassigned but still points to a (quickstart) +* `#27353 `__: MNT, CI: Use separate jobs for WASM wheel builds/uploads +* `#27355 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27356 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27359 `__: MAINT: fix typo in random.binomial +* `#27360 `__: BUG: fix _shrink edge case in np.ma.mask_or +* `#27361 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27363 `__: DOC: Remove reshape from appearing twice in toctree +* `#27364 `__: DOC: Update np.\*stack doc to reflect behavior +* `#27365 `__: MAINT: Bump deadsnakes/action from 3.1.0 to 3.2.0 +* `#27369 `__: DOC: fix incorrect definitions +* `#27372 `__: CI: Update cirrus nightly token +* `#27376 `__: MAINT: Fix a few typos - and sometimes improve wording +* `#27381 `__: DOC: add vecdot to 'See also' of np.dot and np.inner +* `#27384 `__: MAINT: Fix a few more typos +* `#27385 `__: DOC: Update np.unique_all example to demonstrate namedtuple output +* `#27387 `__: DOC: Clarify np.searchsorted documentation and add example for... +* `#27390 `__: MAINT: Bump github/codeql-action from 3.26.6 to 3.26.7 +* `#27391 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.0 +* `#27392 `__: BUG: apply critical sections around populating the dispatch cache +* `#27403 `__: DOC: Fix minor issues in arrays.promotion.rst +* `#27406 `__: BUG: Stub out ``get_build_msvc_version`` if ``distutils.msvccompiler``... +* `#27408 `__: DOC: more informative _excluded_ argument explanation in np.vectorize +* `#27412 `__: MAINT: Bump pypa/cibuildwheel from 2.21.0 to 2.21.1 +* `#27414 `__: MAINT: add Python 3.13 to classifiers +* `#27417 `__: TYP: Allow callable ``converters`` arg in ``numpy.loadtxt`` +* `#27418 `__: TYP: Fix default return dtype of ``numpy.random.Generator.integers``... +* `#27419 `__: TYP: Modernized ``numpy.dtypes`` annotations +* `#27420 `__: TYP: Optional 2nd ``numpy.complexfloating`` type parameter +* `#27421 `__: BUG: Add regression test for gh-27273 +* `#27423 `__: TYP: Add missing type arguments +* `#27424 `__: DOC: Add release notes for #27334 +* `#27425 `__: MAINT: Use correct Python interpreter in tests +* `#27426 `__: MAINT: Bump github/codeql-action from 3.26.7 to 3.26.8 +* `#27427 `__: TYP: Fixed & improved type-hinting for ``any`` and ``all`` +* `#27429 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27430 `__: TYP: Fix type of ``copy`` argument in ``ndarray.reshape`` +* `#27431 `__: BUG: Allow unsigned shift argument for np.roll +* `#27434 `__: ENH: make np.dtype(scalar_type) return the default dtype instance +* `#27438 `__: BUG: Disable SVE VQSort +* `#27440 `__: DOC: Add a link to the migration guide for the deprecation warning... +* `#27441 `__: DOC: remove old versionadded comments from arrays.classes.rst +* `#27442 `__: DOC: Remove old versionchanged directives from config.rst +* `#27443 `__: updated the version of mean param from the release notes (2.0.0) +* `#27444 `__: TST: Added the test case for masked array tofile failing +* `#27445 `__: DOC: removed older versionadded directives to ufuncs.rst +* `#27448 `__: DOC: Example for char.array +* `#27453 `__: DOC: Added docstring for numpy.ma.take() function. +* `#27454 `__: DOC: Remove outdated versionadded/changed directives +* `#27458 `__: MAINT: Bump github/codeql-action from 3.26.8 to 3.26.9 +* `#27464 `__: DOC: Fix a copy-paste mistake in the cumulative_sum docstring. +* `#27465 `__: DOC: update ndindex reference in np.choose docstring +* `#27466 `__: BUG: rfftn axis bug +* `#27469 `__: DOC: Added ``CONTRIBUTING.rst`` +* `#27470 `__: TYP: Add type stubs for stringdtype in np.char and np.strings +* `#27472 `__: MAINT: Check for SVE support on demand +* `#27475 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27478 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27482 `__: Show shape any time it cannot be inferred in repr +* `#27485 `__: MAINT: Bump github/codeql-action from 3.26.9 to 3.26.10 +* `#27486 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.5.0... +* `#27490 `__: API: register NEP 35 functions as array_functions +* `#27491 `__: MAINT: Bump mamba-org/setup-micromamba from 1.9.0 to 1.10.0 +* `#27495 `__: MAINT: Bump pypa/cibuildwheel from 2.21.1 to 2.21.2 +* `#27496 `__: MAINT: Bump mamba-org/setup-micromamba from 1.10.0 to 2.0.0 +* `#27497 `__: DOC: Correct selected C docstrings to eliminate warnings +* `#27499 `__: DOC: fix missing arguments (copy and device) from asanyarray's... +* `#27502 `__: MAINT: Bump github/codeql-action from 3.26.10 to 3.26.11 +* `#27503 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ +* `#27504 `__: ENH: Allow ``ndarray.__array_function__`` to dispatch functions... +* `#27508 `__: MAINT: Pin setuptools for testing [wheel build] +* `#27510 `__: TYP: Mark stub-only classes as ``@type_check_only`` +* `#27511 `__: TYP: Annotate type aliases without annotation +* `#27513 `__: MAINT: Update main after NumPy 2.1.2 release +* `#27517 `__: BENCH: Add benchmarks for np.non_zero +* `#27518 `__: TST: Add tests for np.nonzero with different input types +* `#27520 `__: TYP: Remove unused imports in the stubs +* `#27521 `__: TYP: Fill in the missing ``__all__`` exports +* `#27524 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.0 +* `#27525 `__: MAINT: Bump actions/upload-artifact from 4.4.0 to 4.4.1 +* `#27526 `__: MAINT: Bump github/codeql-action from 3.26.11 to 3.26.12 +* `#27532 `__: MAINT: Bump actions/cache from 4.1.0 to 4.1.1 +* `#27534 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27535 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27536 `__: MAINT: Bump actions/upload-artifact from 4.4.1 to 4.4.3 +* `#27549 `__: BUG: weighted quantile for some zero weights +* `#27550 `__: BLD: update vendored Meson to 1.5.2 +* `#27551 `__: MAINT: Bump github/codeql-action from 3.26.12 to 3.26.13 +* `#27553 `__: BLD: rename ``meson_options.txt`` to ``meson.options`` +* `#27555 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27556 `__: DOC: Clarify use of standard deviation in mtrand.pyx +* `#27557 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27558 `__: MAINT: distutils: remove obsolete search for ``ecc`` executable +* `#27560 `__: CI: start building Windows free-threaded wheels +* `#27564 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27567 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27568 `__: BUILD: vendor tempita from Cython +* `#27579 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27586 `__: MAINT: Update Highway to latest +* `#27587 `__: BLD: treat SVML object files better to avoid compiler warnings +* `#27595 `__: DOC: Clarify obj parameter types in numpy.delete documentation +* `#27598 `__: DOC: add examples to ctypeslib +* `#27602 `__: Update documentation for floating-point precision and determinant... +* `#27604 `__: DOC: Fix rendering in docstring of nan_to_num +* `#27612 `__: ENH: Add comments to ``string_fastsearch.h`` , rename some C-methods +* `#27613 `__: BUG: Fix Linux QEMU CI workflow +* `#27615 `__: ENH: Fix np.insert to handle boolean arrays as masks +* `#27617 `__: DOC: Update the RELEASE_WALKTHROUGH.rst file. +* `#27619 `__: MAINT: Bump actions/cache from 4.1.1 to 4.1.2 +* `#27620 `__: MAINT: Bump actions/dependency-review-action from 4.3.4 to 4.3.5 +* `#27621 `__: MAINT: Bump github/codeql-action from 3.26.13 to 3.27.0 +* `#27627 `__: ENH: Re-enable VSX from build targets for sin/cos +* `#27630 `__: ENH: Extern memory management to Cython +* `#27634 `__: MAINT: Bump actions/setup-python from 5.2.0 to 5.3.0 +* `#27636 `__: BUG: fixes for StringDType/unicode promoters +* `#27643 `__: BUG : avoid maximum fill value of datetime and timedelta return... +* `#27644 `__: DOC: Remove ambiguity in docs for ndarray.byteswap() +* `#27650 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27652 `__: TYP,TST: Bump ``mypy`` from ``1.11.1`` to ``1.13.0`` +* `#27653 `__: TYP: Fix Array API method signatures +* `#27659 `__: TYP: Transparent ``ndarray`` unary operator method signatures +* `#27661 `__: BUG: np.cov transpose control +* `#27663 `__: MAINT: fix wasm32 runtime type error in numpy._core +* `#27664 `__: MAINT: Bump actions/dependency-review-action from 4.3.5 to 4.4.0 +* `#27665 `__: ENH: Re-enable VXE from build targets for sin/cos +* `#27666 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27667 `__: TYP: Allow returning non-array-likes from the ``apply_along_axis``... +* `#27676 `__: CI: Attempt to fix CI on 32 bit linux +* `#27678 `__: DOC: fix incorrect versionadded for np.std +* `#27680 `__: MAINT: fix typo / copy paste error +* `#27681 `__: TYP: Fix some inconsistencies in the scalar methods and properties +* `#27683 `__: TYP: Improve ``np.sum`` and ``np.mean`` return types with given... +* `#27684 `__: DOC: fix spelling of "reality" in ``_nanfunctions_impl.pyi`` +* `#27685 `__: MAINT: Drop useless shebang +* `#27691 `__: TYP: Use ``_typeshed`` to clean up the stubs +* `#27693 `__: MAINT: Update main after 2.1.3 release. +* `#27695 `__: BUG: Fix multiple modules in F2PY and COMMON handling +* `#27702 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.4 to 3.1.0 +* `#27705 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.0 to 2.0.1 +* `#27706 `__: DOC: Remove empty notes +* `#27707 `__: CI: Set up free-threaded CI using quansight-labs/setup-python +* `#27708 `__: DOC: Remove version notes +* `#27714 `__: DOC: fix a mistake in the docstring of vector_norm +* `#27715 `__: BUG: fix incorrect output descriptor in fancy indexing +* `#27716 `__: ENH: Make ``__module__`` attribute coherent across API +* `#27721 `__: DOC: fix name of shape parameter kappa of von Mises distribution +* `#27723 `__: BUG: Allow empty memmaps in most situations +* `#27724 `__: MAINT: Bump github/codeql-action from 3.27.0 to 3.27.1 +* `#27728 `__: BUG: Handle ``--lower`` for F2PY directives and callbacks +* `#27729 `__: BUG: f2py: fix issues with thread-local storage define +* `#27730 `__: TST: Add an F2PY check for exposing variables without functions +* `#27731 `__: BUG: Fix ``fortranname`` for functions +* `#27734 `__: Fix documentation for the chi-square distribution +* `#27735 `__: ENH: Add a ``__dict__`` to ufunc objects and allow overriding... +* `#27736 `__: TYP: Optional ``numpy.number`` type parameters +* `#27742 `__: MAINT: Bump github/codeql-action from 3.27.1 to 3.27.2 +* `#27743 `__: DOC: Fix typos in subclassing documentation +* `#27746 `__: DOC: Added additional guidance for compiling in Windows +* `#27750 `__: TYP: Fix ``ndarray.item()`` and improve ``ndarray.tolist()`` +* `#27753 `__: TYP: Fix the annotations of ``ndarray.real`` and ``ndarray.imag`` +* `#27754 `__: MAINT: Bump github/codeql-action from 3.27.2 to 3.27.3 +* `#27755 `__: TYP: Annotate ``__setitem__`` , ``__contains__`` and ``__iter__``... +* `#27756 `__: TYP: 1-d shape-typing for ``ndarray.flatten`` and ``ravel`` +* `#27757 `__: TYP: Remove the non-existent ``bitwise_count`` methods of ``ndarray``... +* `#27758 `__: TYP: Remove ``ndarray`` binop overloads for ``NDArray[Never]`` +* `#27763 `__: DOC: Note that allow-pickle is not safe also in error +* `#27765 `__: TYP: Shape-typed ``ndarray`` inplace binary operator methods. +* `#27766 `__: MAINT: Bump github/codeql-action from 3.27.3 to 3.27.4 +* `#27767 `__: TYP: Support shape-typing in ``reshape`` and ``resize`` +* `#27769 `__: TYP: Towards a less messy ``__init__.pyi`` +* `#27770 `__: TYP: Fix incorrect baseclass of ``linalg.LinAlgError`` +* `#27771 `__: ENH: ``default_rng`` coerces ``RandomState`` to ``Generator`` +* `#27773 `__: BUG: Fix repeat, accumulate for strings and accumulate API logic +* `#27775 `__: TYP: Fix undefined type-parameter name +* `#27776 `__: TYP: Fix method overload issues in ``ndarray`` and ``generic`` +* `#27778 `__: TYP: Generic ``numpy.generic`` type parameter for the ``item()``... +* `#27779 `__: TYP: Type hints for ``numpy.__config__`` +* `#27788 `__: DOC: Make wording in absolute beginners guide more beginner friendly +* `#27790 `__: TYP: Generic ``timedelta64`` and ``datetime64`` scalar types +* `#27792 `__: TYP: Generic ``numpy.bool`` and statically typed boolean logic +* `#27794 `__: MAINT: Upgrade to spin 0.13 +* `#27795 `__: update pythoncapi-compat to latest HEAD +* `#27800 `__: BUG: Ensure context path is taken in masked array array-wrap +* `#27802 `__: BUG: Ensure that same-kind casting works for uints (mostly) +* `#27803 `__: MAINT: Bump github/codeql-action from 3.27.4 to 3.27.5 +* `#27806 `__: DOC: Improve choice() documentation about return types +* `#27807 `__: BUG,ENH: Fix internal ``__array_wrap__`` for direct calls +* `#27808 `__: ENH: Ensure hugepages are also indicated for calloc allocations +* `#27809 `__: BUG: Fix array flags propagation in boolean indexing +* `#27810 `__: MAINT: Bump actions/dependency-review-action from 4.4.0 to 4.5.0 +* `#27812 `__: BUG: ``timedelta64.__[r]divmod__`` segfaults for incompatible... +* `#27813 `__: DOC: fix broken reference in arrays.classes.rst +* `#27815 `__: DOC: Add a release fragment for gh-14622 +* `#27816 `__: MAINT: Fixup that spin can be installed via conda too now +* `#27817 `__: DEV: changelog: make title processing more robust +* `#27828 `__: CI: skip ninja installation in linux_qemu workflows +* `#27829 `__: CI: update circleci to python3.11.10, limit parallel builds.... +* `#27831 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27843 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27845 `__: BUG: Never negate strides in reductions (for now) +* `#27846 `__: ENH: add matvec and vecmat gufuncs +* `#27852 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27853 `__: REL: Prepare for the NumPy 2.2.0rc1 release [wheel build] +* `#27874 `__: BUG: fix importing numpy in Python's optimized mode (#27868) +* `#27895 `__: DOC: Fix double import in docs (#27878) +* `#27904 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27906 `__: MAINT: Use mask_store instead of store for compiler workaround +* `#27908 `__: MAINT: Update highway from main. +* `#27911 `__: ENH: update __module__ in numpy.random module +* `#27912 `__: ENH: Refactor ``__qualname__`` across API +* `#27913 `__: PERF: improve multithreaded ufunc scaling +* `#27916 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 + diff --git a/doc/changelog/2.2.1-changelog.rst b/doc/changelog/2.2.1-changelog.rst new file mode 100644 index 000000000000..ba3c4f19eb3f --- /dev/null +++ b/doc/changelog/2.2.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer diff --git a/doc/changelog/2.2.2-changelog.rst b/doc/changelog/2.2.2-changelog.rst new file mode 100644 index 000000000000..ac856c97174c --- /dev/null +++ b/doc/changelog/2.2.2-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports diff --git a/doc/changelog/2.2.3-changelog.rst b/doc/changelog/2.2.3-changelog.rst new file mode 100644 index 000000000000..2cb6e99eec51 --- /dev/null +++ b/doc/changelog/2.2.3-changelog.rst @@ -0,0 +1,43 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/changelog/2.2.4-changelog.rst b/doc/changelog/2.2.4-changelog.rst new file mode 100644 index 000000000000..1e2664ebde48 --- /dev/null +++ b/doc/changelog/2.2.4-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar +* Andrej Zhilenkov +* Andrew Nelson +* Charles Harris +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Jonathan Albrecht + +* Joren Hammudoglu +* Mark Harfouche +* Matthieu Darbois +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg +* Tyler Reddy +* lvllvl + + +Pull requests merged +==================== + +A total of 17 pull requests were merged for this release. + +* `#28333 `__: MAINT: Prepare 2.2.x for further development. +* `#28348 `__: TYP: fix positional- and keyword-only params in astype, cross... +* `#28377 `__: MAINT: Update FreeBSD version and fix test failure +* `#28379 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28385 `__: BUG: Make np.nonzero threading safe +* `#28420 `__: BUG: safer bincount casting (backport to 2.2.x) +* `#28422 `__: BUG: Fix building on s390x with clang +* `#28423 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28424 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28435 `__: BUG: Fix searchsorted and CheckFromAny byte-swapping logic +* `#28449 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28510 `__: MAINT: Hide decorator from pytest traceback +* `#28512 `__: TYP: Typing fixes backported from #28452, #28491, #28494 +* `#28521 `__: TYP: Backport fixes from #28505, #28506, #28508, and #28511 +* `#28533 `__: TYP: Backport typing fixes from main (2) +* `#28534 `__: TYP: Backport typing fixes from main (3) +* `#28542 `__: TYP: Backport typing fixes from main (4) diff --git a/doc/changelog/2.2.5-changelog.rst b/doc/changelog/2.2.5-changelog.rst new file mode 100644 index 000000000000..409c243d148e --- /dev/null +++ b/doc/changelog/2.2.5-changelog.rst @@ -0,0 +1,39 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Baskar Gopinath + +* Nathan Goldbaum +* Nicholas Christensen + +* Sayed Adel +* karl + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#28545 `__: MAINT: Prepare 2.2.x for further development +* `#28582 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28583 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28585 `__: TYP: fix typing errors in ``_core.strings`` +* `#28631 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28632 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28633 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28650 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28654 `__: BUG: fix deepcopying StringDType arrays (#28643) +* `#28661 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28663 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28682 `__: SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD +* `#28683 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28705 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28706 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28723 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28758 `__: TYP: some ``[arg]partition`` fixes +* `#28772 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28774 `__: TYP: fix ``count_nonzero`` signature diff --git a/doc/changelog/2.2.6-changelog.rst b/doc/changelog/2.2.6-changelog.rst new file mode 100644 index 000000000000..16c62da4a927 --- /dev/null +++ b/doc/changelog/2.2.6-changelog.rst @@ -0,0 +1,32 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes (#28883) +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/release/upcoming_changes/14622.improvement.rst b/doc/release/upcoming_changes/14622.improvement.rst deleted file mode 100644 index 3a3cd01f305d..000000000000 --- a/doc/release/upcoming_changes/14622.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``datetime64`` and ``timedelta64`` hashes now - correctly match the Pythons builtin ``datetime`` and - ``timedelta`` ones. The hashes now evaluated equal - even for equal values with different time units. diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index f9223a1d1114..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.fix` now won't perform casting to a floating data-type for integer - and boolean data-type input arrays. diff --git a/doc/release/upcoming_changes/27088.change.rst b/doc/release/upcoming_changes/27088.change.rst deleted file mode 100644 index c9057ba53ea0..000000000000 --- a/doc/release/upcoming_changes/27088.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -Bump the musllinux CI image and wheels to 1_2 from 1_1. This is because 1_1 is -`end of life `_. diff --git a/doc/release/upcoming_changes/27119.performance.rst b/doc/release/upcoming_changes/27119.performance.rst deleted file mode 100644 index abf7b58e4e8a..000000000000 --- a/doc/release/upcoming_changes/27119.performance.rst +++ /dev/null @@ -1,4 +0,0 @@ -* NumPy now uses fast-on-failure attribute lookups for protocols. - This can greatly reduce overheads of function calls or array creation - especially with custom Python objects. The largest improvements - will be seen on Python 3.12 or newer. diff --git a/doc/release/upcoming_changes/27147.performance.rst b/doc/release/upcoming_changes/27147.performance.rst deleted file mode 100644 index f2ec14212ef1..000000000000 --- a/doc/release/upcoming_changes/27147.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on - benchmarking, there are 5 clusters of performance around these kernels: - ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. - -* OpenBLAS on windows is linked without quadmath, simplifying licensing - -* Due to a regression in OpenBLAS on windows, the performance improvements - when using multiple threads for OpenBLAS 0.3.26 were reverted. diff --git a/doc/release/upcoming_changes/27156.change.rst b/doc/release/upcoming_changes/27156.change.rst deleted file mode 100644 index 5902b76d4332..000000000000 --- a/doc/release/upcoming_changes/27156.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -NEP 50 promotion state option removed -------------------------------------- -The NEP 50 promotion state settings are now removed. They were always -meant as temporary means for testing. -A warning will be given if the environment variable is set to anything -but ``NPY_PROMOTION_STATE=weak`` while ``_set_promotion_state`` -and ``_get_promotion_state`` are removed. -In case code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` -could be used to replace it when not available. diff --git a/doc/release/upcoming_changes/27160.expired.rst b/doc/release/upcoming_changes/27160.expired.rst deleted file mode 100644 index 9334aed2bad6..000000000000 --- a/doc/release/upcoming_changes/27160.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``bool(np.array([]))`` and other empty arrays will now raise an error. - Use ``arr.size > 0`` instead to check whether an array has no elements. diff --git a/doc/release/upcoming_changes/27334.change.rst b/doc/release/upcoming_changes/27334.change.rst deleted file mode 100644 index e8d98ced1776..000000000000 --- a/doc/release/upcoming_changes/27334.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -* The type annotations of ``numpy.float64`` and ``numpy.complex128`` now - reflect that they are also subtypes of the built-in ``float`` and ``complex`` - types, respectively. This update prevents static type-checkers from reporting - errors in cases such as: - - .. code-block:: python - - x: float = numpy.float64(6.28) # valid - z: complex = numpy.complex128(-1j) # valid diff --git a/doc/release/upcoming_changes/27420.new_feature.rst b/doc/release/upcoming_changes/27420.new_feature.rst deleted file mode 100644 index 7f6e223cda62..000000000000 --- a/doc/release/upcoming_changes/27420.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.complexfloating[T, T]`` can now also be written as - ``np.complexfloating[T]`` diff --git a/doc/release/upcoming_changes/27482.change.rst b/doc/release/upcoming_changes/27482.change.rst deleted file mode 100644 index 3c974077e0d0..000000000000 --- a/doc/release/upcoming_changes/27482.change.rst +++ /dev/null @@ -1,8 +0,0 @@ -* The ``repr`` of arrays large enough to be summarized (i.e., where elements - are replaced with ``...``) now includes the ``shape`` of the array, similar - to what already was the case for arrays with zero size and non-obvious - shape. With this change, the shape is always given when it cannot be - inferred from the values. Note that while written as ``shape=...``, this - argument cannot actually be passed in to the ``np.array`` constructor. If - you encounter problems, e.g., due to failing doctests, you can use the print - option ``legacy=2.1`` to get the old behaviour. diff --git a/doc/release/upcoming_changes/27636.improvement.rst b/doc/release/upcoming_changes/27636.improvement.rst deleted file mode 100644 index 53c202b31197..000000000000 --- a/doc/release/upcoming_changes/27636.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Fixed a number of issues around promotion for string ufuncs with StringDType - arguments. Mixing StringDType and the fixed-width DTypes using the string - ufuncs should now generate much more uniform results. diff --git a/doc/release/upcoming_changes/27661.compatibility.rst b/doc/release/upcoming_changes/27661.compatibility.rst deleted file mode 100644 index 0482f876766c..000000000000 --- a/doc/release/upcoming_changes/27661.compatibility.rst +++ /dev/null @@ -1,5 +0,0 @@ -* `numpy.cov` now properly transposes single-row (2d array) design matrices - when ``rowvar=False``. Previously, single-row design matrices would - return a scalar in this scenario, which is not correct, so this - is a behavior change and an array of the appropriate shape will - now be returned. diff --git a/doc/release/upcoming_changes/27695.improvement.rst b/doc/release/upcoming_changes/27695.improvement.rst deleted file mode 100644 index 95584b6e90ce..000000000000 --- a/doc/release/upcoming_changes/27695.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``f2py`` handles multiple modules and exposes variables again -------------------------------------------------------------- -A regression has been fixed which allows F2PY users to expose variables to -Python in modules with only assignments, and also fixes situations where -multiple modules are present within a single source file. diff --git a/doc/release/upcoming_changes/27723.improvement.rst b/doc/release/upcoming_changes/27723.improvement.rst deleted file mode 100644 index bffc9d5a17de..000000000000 --- a/doc/release/upcoming_changes/27723.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Improved support for empty `memmap`. Previously an empty `memmap` would fail - unless a non-zero ``offset`` was set. Now a zero-size `memmap` is supported - even if ``offset=0``. To achieve this, if a `memmap` is mapped to an empty - file that file is padded with a single byte. diff --git a/doc/release/upcoming_changes/27735.deprecation.rst b/doc/release/upcoming_changes/27735.deprecation.rst deleted file mode 100644 index 897a3871264b..000000000000 --- a/doc/release/upcoming_changes/27735.deprecation.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``_add_newdoc_ufunc`` is now deprecated. ``ufunc.__doc__ = newdoc`` should - be used instead. diff --git a/doc/release/upcoming_changes/27735.new_feature.rst b/doc/release/upcoming_changes/27735.new_feature.rst deleted file mode 100644 index 4d216218399d..000000000000 --- a/doc/release/upcoming_changes/27735.new_feature.rst +++ /dev/null @@ -1,4 +0,0 @@ -* UFuncs now support ``__dict__`` attribute and allow overriding ``__doc__`` - (either directly or via ``ufunc.__dict__["__doc__"]``). ``__dict__`` can be - used to also override other properties, such as ``__module__`` or - ``__qualname__``. diff --git a/doc/release/upcoming_changes/27736.new_feature.rst b/doc/release/upcoming_changes/27736.new_feature.rst deleted file mode 100644 index 01422db19726..000000000000 --- a/doc/release/upcoming_changes/27736.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The "nbit" type parameter of ``np.number`` and its subtypes now defaults - to ``typing.Any``. This way, type-checkers will infer annotations such as - ``x: np.floating`` as ``x: np.floating[Any]``, even in strict mode. diff --git a/doc/release/upcoming_changes/27807.change.rst b/doc/release/upcoming_changes/27807.change.rst deleted file mode 100644 index 995c1770e224..000000000000 --- a/doc/release/upcoming_changes/27807.change.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Calling ``__array_wrap__`` directly on NumPy arrays or scalars - now does the right thing when ``return_scalar`` is passed - (Added in NumPy 2). It is further safe now to call the scalar - ``__array_wrap__`` on a non-scalar result. diff --git a/doc/release/upcoming_changes/27808.performance.rst b/doc/release/upcoming_changes/27808.performance.rst deleted file mode 100644 index e3d5648d3d38..000000000000 --- a/doc/release/upcoming_changes/27808.performance.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now indicates hugepages also for large ``np.zeros`` allocations - on linux. Thus should generally improve performance. diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 976a03a9a449..77d39d0e771f 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -65,7 +65,6 @@ arbitrary 128-bit integer. >>> import numpy as np >>> import secrets - >>> import numpy as np >>> secrets.randbits(128) #doctest: +SKIP 122807528840384100672342137672332424406 # may vary >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst index 49c1ea7bce7a..d4fd7f9e0677 100644 --- a/doc/source/reference/routines.linalg.rst +++ b/doc/source/reference/routines.linalg.rst @@ -62,6 +62,8 @@ Matrix and vector products outer matmul linalg.matmul (Array API compatible location) + matvec + vecmat tensordot linalg.tensordot (Array API compatible location) einsum diff --git a/doc/source/release.rst b/doc/source/release.rst index fd0702f4ae17..6cef4da82790 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,13 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.7 + 2.2.6 + 2.2.5 + 2.2.4 + 2.2.3 + 2.2.2 + 2.2.1 2.2.0 2.1.3 2.1.2 diff --git a/doc/source/release/2.2.0-notes.rst b/doc/source/release/2.2.0-notes.rst index 125653352572..41b3d2b58004 100644 --- a/doc/source/release/2.2.0-notes.rst +++ b/doc/source/release/2.2.0-notes.rst @@ -4,16 +4,207 @@ NumPy 2.2.0 Release Notes ========================== +The NumPy 2.2.0 release is quick release that brings us back into sync with the +usual twice yearly release cycle. There have been an number of small cleanups, +as well as work bringing the new StringDType to completion and improving support +for free threaded Python. Highlights are: -Highlights -========== +* New functions ``matvec`` and ``vecmat``, see below. +* Many improved annotations. +* Improved support for the new StringDType. +* Improved support for free threaded Python +* Fixes for f2py -*We'll choose highlights for this release near the end of the release cycle.* +This release supports Python versions 3.10-3.13. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +Deprecations +============ -.. **Content from release note snippets in doc/release/upcoming_changes:** +* ``_add_newdoc_ufunc`` is now deprecated. ``ufunc.__doc__ = newdoc`` should + be used instead. + + (`gh-27735 `__) + + +Expired deprecations +==================== + +* ``bool(np.array([]))`` and other empty arrays will now raise an error. + Use ``arr.size > 0`` instead to check whether an array has no elements. + + (`gh-27160 `__) + + +Compatibility notes +=================== + +* `numpy.cov` now properly transposes single-row (2d array) design matrices + when ``rowvar=False``. Previously, single-row design matrices would return a + scalar in this scenario, which is not correct, so this is a behavior change + and an array of the appropriate shape will now be returned. + + (`gh-27661 `__) + + +New Features +============ + +* New functions for matrix-vector and vector-matrix products + + Two new generalized ufuncs were defined: + + * `numpy.matvec` - matrix-vector product, treating the arguments as + stacks of matrices and column vectors, respectively. + + * `numpy.vecmat` - vector-matrix product, treating the arguments as + stacks of column vectors and matrices, respectively. For complex + vectors, the conjugate is taken. + + These add to the existing `numpy.matmul` as well as to `numpy.vecdot`, + which was added in numpy 2.0. + + Note that `numpy.matmul` never takes a complex conjugate, also not + when its left input is a vector, while both `numpy.vecdot` and + `numpy.vecmat` do take the conjugate for complex vectors on the + left-hand side (which are taken to be the ones that are transposed, + following the physics convention). + + (`gh-25675 `__) + +* ``np.complexfloating[T, T]`` can now also be written as + ``np.complexfloating[T]`` + + (`gh-27420 `__) + +* UFuncs now support ``__dict__`` attribute and allow overriding ``__doc__`` + (either directly or via ``ufunc.__dict__["__doc__"]``). ``__dict__`` can be + used to also override other properties, such as ``__module__`` or + ``__qualname__``. + + (`gh-27735 `__) + +* The "nbit" type parameter of ``np.number`` and its subtypes now defaults + to ``typing.Any``. This way, type-checkers will infer annotations such as + ``x: np.floating`` as ``x: np.floating[Any]``, even in strict mode. + + (`gh-27736 `__) + + +Improvements +============ + +* The ``datetime64`` and ``timedelta64`` hashes now correctly match the Pythons + builtin ``datetime`` and ``timedelta`` ones. The hashes now evaluated equal + even for equal values with different time units. + + (`gh-14622 `__) + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + +* Improved support for empty `memmap`. Previously an empty `memmap` would fail + unless a non-zero ``offset`` was set. Now a zero-size `memmap` is supported + even if ``offset=0``. To achieve this, if a `memmap` is mapped to an empty + file that file is padded with a single byte. + + (`gh-27723 `__) + +``f2py`` handles multiple modules and exposes variables again +------------------------------------------------------------- +A regression has been fixed which allows F2PY users to expose variables to +Python in modules with only assignments, and also fixes situations where +multiple modules are present within a single source file. + +(`gh-27695 `__) + + +Performance improvements and changes +==================================== + +* Improved multithreaded scaling on the free-threaded build when many threads + simultaneously call the same ufunc operations. + + (`gh-27896 `__) + +* NumPy now uses fast-on-failure attribute lookups for protocols. This can + greatly reduce overheads of function calls or array creation especially with + custom Python objects. The largest improvements will be seen on Python 3.12 + or newer. + + (`gh-27119 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + +* Due to a regression in OpenBLAS on windows, the performance improvements + when using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +* NumPy now indicates hugepages also for large ``np.zeros`` allocations + on linux. Thus should generally improve performance. + + (`gh-27808 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + +* The type annotations of ``numpy.float64`` and ``numpy.complex128`` now + reflect that they are also subtypes of the built-in ``float`` and ``complex`` + types, respectively. This update prevents static type-checkers from reporting + errors in cases such as: + + .. code-block:: python + + x: float = numpy.float64(6.28) # valid + z: complex = numpy.complex128(-1j) # valid + + (`gh-27334 `__) + +* The ``repr`` of arrays large enough to be summarized (i.e., where elements + are replaced with ``...``) now includes the ``shape`` of the array, similar + to what already was the case for arrays with zero size and non-obvious + shape. With this change, the shape is always given when it cannot be + inferred from the values. Note that while written as ``shape=...``, this + argument cannot actually be passed in to the ``np.array`` constructor. If + you encounter problems, e.g., due to failing doctests, you can use the print + option ``legacy=2.1`` to get the old behaviour. + + (`gh-27482 `__) + +* Calling ``__array_wrap__`` directly on NumPy arrays or scalars now does the + right thing when ``return_scalar`` is passed (Added in NumPy 2). It is + further safe now to call the scalar ``__array_wrap__`` on a non-scalar + result. + + (`gh-27807 `__) + +Bump the musllinux CI image and wheels to 1_2 from 1_1. This is because 1_1 is +`end of life `_. + +(`gh-27088 `__) + +NEP 50 promotion state option removed +------------------------------------- +The NEP 50 promotion state settings are now removed. They were always meant as +temporary means for testing. A warning will be given if the environment +variable is set to anything but ``NPY_PROMOTION_STATE=weak`` while +``_set_promotion_state`` and ``_get_promotion_state`` are removed. In case +code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` could be used to +replace it when not available. + +(`gh-27156 `__) -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.2.1-notes.rst b/doc/source/release/2.2.1-notes.rst new file mode 100644 index 000000000000..fe60fa0268f3 --- /dev/null +++ b/doc/source/release/2.2.1-notes.rst @@ -0,0 +1,54 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.1 Release Notes +========================== + +NumPy 2.2.1 is a patch release following 2.2.0. It fixes bugs found after the +2.2.0 release and has several maintenance pins to work around upstream changes. + +There was some breakage in downstream projects following the 2.2.0 release due +to updates to NumPy typing. Because of problems due to MyPy defects, we +recommend using basedpyright for type checking, it can be installed from +PyPI. The Pylance extension for Visual Studio Code is also based on Pyright. +Problems that persist when using basedpyright should be reported as issues +on the NumPy github site. + +This release supports Python 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer + diff --git a/doc/source/release/2.2.2-notes.rst b/doc/source/release/2.2.2-notes.rst new file mode 100644 index 000000000000..8a3de547ec81 --- /dev/null +++ b/doc/source/release/2.2.2-notes.rst @@ -0,0 +1,49 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.2 Release Notes +========================== + +NumPy 2.2.2 is a patch release that fixes bugs found after the 2.2.1 release. +The number of typing fixes/updates is notable. This release supports Python +versions 3.10-3.13. + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports + diff --git a/doc/source/release/2.2.3-notes.rst b/doc/source/release/2.2.3-notes.rst new file mode 100644 index 000000000000..cf21d751ec00 --- /dev/null +++ b/doc/source/release/2.2.3-notes.rst @@ -0,0 +1,56 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.3 Release Notes +========================== + +NumPy 2.2.3 is a patch release that fixes bugs found after the 2.2.2 release. +The majority of the changes are typing improvements and fixes for free +threaded Python. Both of those areas are still under development, so if you +discover new problems, please report them. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/source/release/2.2.4-notes.rst b/doc/source/release/2.2.4-notes.rst new file mode 100644 index 000000000000..8542c98a8af9 --- /dev/null +++ b/doc/source/release/2.2.4-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.4 Release Notes +========================== + +NumPy 2.2.4 is a patch release that fixes bugs found after the 2.2.3 release. +There are a large number of typing improvements, the rest of the changes are +the usual mix of bugfixes and platform maintenace. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar +* Andrej Zhilenkov +* Andrew Nelson +* Charles Harris +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Jonathan Albrecht + +* Joren Hammudoglu +* Mark Harfouche +* Matthieu Darbois +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg +* Tyler Reddy +* lvllvl + + + +Pull requests merged +==================== + +A total of 17 pull requests were merged for this release. + +* `#28333 `__: MAINT: Prepare 2.2.x for further development. +* `#28348 `__: TYP: fix positional- and keyword-only params in astype, cross... +* `#28377 `__: MAINT: Update FreeBSD version and fix test failure +* `#28379 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28385 `__: BUG: Make np.nonzero threading safe +* `#28420 `__: BUG: safer bincount casting (backport to 2.2.x) +* `#28422 `__: BUG: Fix building on s390x with clang +* `#28423 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28424 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28435 `__: BUG: Fix searchsorted and CheckFromAny byte-swapping logic +* `#28449 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28510 `__: MAINT: Hide decorator from pytest traceback +* `#28512 `__: TYP: Typing fixes backported from #28452, #28491, #28494 +* `#28521 `__: TYP: Backport fixes from #28505, #28506, #28508, and #28511 +* `#28533 `__: TYP: Backport typing fixes from main (2) +* `#28534 `__: TYP: Backport typing fixes from main (3) +* `#28542 `__: TYP: Backport typing fixes from main (4) diff --git a/doc/source/release/2.2.5-notes.rst b/doc/source/release/2.2.5-notes.rst new file mode 100644 index 000000000000..e1c3205b006d --- /dev/null +++ b/doc/source/release/2.2.5-notes.rst @@ -0,0 +1,53 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.5 Release Notes +========================== + +NumPy 2.2.5 is a patch release that fixes bugs found after the 2.2.4 release. +It has a large number of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Baskar Gopinath + +* Nathan Goldbaum +* Nicholas Christensen + +* Sayed Adel +* karl + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#28545 `__: MAINT: Prepare 2.2.x for further development +* `#28582 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28583 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28585 `__: TYP: fix typing errors in ``_core.strings`` +* `#28631 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28632 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28633 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28650 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28654 `__: BUG: fix deepcopying StringDType arrays (#28643) +* `#28661 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28663 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28682 `__: SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD +* `#28683 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28705 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28706 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28723 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28758 `__: TYP: some ``[arg]partition`` fixes +* `#28772 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28774 `__: TYP: fix ``count_nonzero`` signature + diff --git a/doc/source/release/2.2.6-notes.rst b/doc/source/release/2.2.6-notes.rst new file mode 100644 index 000000000000..974f59d640db --- /dev/null +++ b/doc/source/release/2.2.6-notes.rst @@ -0,0 +1,43 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.6 Release Notes +========================== + +NumPy 2.2.6 is a patch release that fixes bugs found after the 2.2.5 release. +It is a mix of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/source/release/2.2.7-notes.rst b/doc/source/release/2.2.7-notes.rst new file mode 100644 index 000000000000..d312e4409353 --- /dev/null +++ b/doc/source/release/2.2.7-notes.rst @@ -0,0 +1,25 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.7 Release Notes +========================== + +NumPy 2.2.7 is a patch release that fixes bugs found after the 2.2.6 release. +It is a mix of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/environment.yml b/environment.yml index ff9fd9e84c20..46655d750d0d 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.13.0 + - mypy=1.14.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/numpy/__config__.py.in b/numpy/__config__.py.in index 0040847708cc..a62f531c3769 100644 --- a/numpy/__config__.py.in +++ b/numpy/__config__.py.in @@ -7,7 +7,7 @@ from numpy._core._multiarray_umath import ( __cpu_dispatch__, ) -__all__ = ["show"] +__all__ = ["show_config"] _built_with_meson = True @@ -161,4 +161,10 @@ def show(mode=DisplayModes.stdout.value): f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" ) -show.__module__ = "numpy" + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi index bfb13bae1cda..bd01228a1cc8 100644 --- a/numpy/__config__.pyi +++ b/numpy/__config__.pyi @@ -81,7 +81,7 @@ _ConfigDict = TypedDict( ### -__all__ = ["show"] +__all__ = ["show_config"] CONFIG: Final[_ConfigDict] = ... @@ -95,3 +95,8 @@ def _check_pyyaml() -> ModuleType: ... def show(mode: L["stdout"] = "stdout") -> None: ... @overload def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 9fbdbc59d782..0728aad4829f 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -151,6 +151,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: @@ -857,6 +858,14 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + cdef extern from "numpy/arrayscalars.h": @@ -1108,10 +1117,6 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: return (obj).obmeta.base -# Iterator API added in v1.6 -ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil -ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil - cdef extern from "numpy/arrayobject.h": ctypedef struct NpyIter: @@ -1229,9 +1234,12 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL - NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, - char** errmsg) except NULL + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL char** NpyIter_GetDataPtrArray(NpyIter* it) nogil char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil npy_intp* NpyIter_GetIndexPtr(NpyIter* it) diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 4aa14530ab4f..6a62a3820042 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -160,6 +160,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: @@ -772,6 +773,13 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil cdef extern from "numpy/arrayscalars.h": @@ -1023,10 +1031,6 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: return (obj).obmeta.base -# Iterator API added in v1.6 -ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil -ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil - cdef extern from "numpy/arrayobject.h": ctypedef struct NpyIter: @@ -1144,6 +1148,9 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, char** errmsg) except NULL diff --git a/numpy/__init__.py b/numpy/__init__.py index 13c899384842..2a4fd03b6a44 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -111,7 +111,7 @@ from . import _distributor_init try: - from numpy.__config__ import show as show_config + from numpy.__config__ import show_config except ImportError as e: msg = """Error importing numpy: you should not try to import numpy from its source directory; please exit the numpy source tree, and relaunch @@ -151,10 +151,10 @@ left_shift, less, less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, - matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, - min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, - ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, - not_equal, number, object_, ones, ones_like, outer, partition, + matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap, + min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, + ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, + nonzero, not_equal, number, object_, ones, ones_like, outer, partition, permute_dims, pi, positive, pow, power, printoptions, prod, promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, reciprocal, record, remainder, repeat, require, reshape, resize, @@ -165,8 +165,8 @@ str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, void, - vstack, where, zeros, zeros_like + ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, + vecmat, void, vstack, where, zeros, zeros_like ) # NOTE: It's still under discussion whether these aliases diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 18dbf22e98ad..cbd77a128ab9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,10 +1,10 @@ +# ruff: noqa: I001 import builtins import sys import mmap import ctypes as ct import array as _array import datetime as dt -import enum from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -22,28 +22,30 @@ from numpy._typing import ( NDArray, _SupportsArray, _NestedSequence, - _FiniteNestedSequence, + _ArrayLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeBytes_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, - _ArrayLikeObject_co, - # DTypes DTypeLike, _DTypeLike, _DTypeLikeVoid, _VoidDTypeLike, - # Shapes _Shape, _ShapeLike, - # Scalars _CharLike_co, _IntLike_co, @@ -51,7 +53,6 @@ from numpy._typing import ( _TD64Like_co, _NumberLike_co, _ScalarLike_co, - # `number` precision NBitBase, # NOTE: Do not remove the extended precision bit-types even if seemingly unused; @@ -74,7 +75,6 @@ from numpy._typing import ( _NBitSingle, _NBitDouble, _NBitLongDouble, - # Character codes _BoolCodes, _UInt8Codes, @@ -116,7 +116,6 @@ from numpy._typing import ( _VoidCodes, _ObjectCodes, _StringCodes, - _UnsignedIntegerCodes, _SignedIntegerCodes, _IntegerCodes, @@ -127,7 +126,6 @@ from numpy._typing import ( _CharacterCodes, _FlexibleCodes, _GenericCodes, - # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -203,17 +201,19 @@ else: ) from typing import ( - Literal as L, Any, + ClassVar, + Final, + Generic, + Literal as L, NoReturn, SupportsComplex, SupportsFloat, SupportsInt, SupportsIndex, - Final, - final, - ClassVar, TypeAlias, + TypedDict, + final, type_check_only, ) @@ -222,11 +222,13 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, Generic, LiteralString, Never, Protocol, Self, TypeVar, Unpack, deprecated, overload +from typing_extensions import CapsuleType, LiteralString, Never, Protocol, Self, TypeVar, Unpack, deprecated, overload from numpy import ( + char, core, ctypeslib, + dtypes, exceptions, f2py, fft, @@ -235,14 +237,21 @@ from numpy import ( ma, polynomial, random, - testing, - typing, - version, - dtypes, rec, - char, strings, + testing, + typing, +) + +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, ) +if sys.version_info < (3, 12): + from numpy import distutils as distutils from numpy._core.records import ( record, @@ -427,6 +436,8 @@ from numpy._core.shape_base import ( unstack, ) +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ + from numpy.lib import ( scimath as emath, ) @@ -437,6 +448,7 @@ from numpy.lib._arraypad_impl import ( from numpy.lib._arraysetops_impl import ( ediff1d, + in1d, intersect1d, isin, setdiff1d, @@ -478,6 +490,8 @@ from numpy.lib._function_base_impl import ( bartlett, blackman, kaiser, + trapezoid, + trapz, i0, meshgrid, delete, @@ -485,9 +499,10 @@ from numpy.lib._function_base_impl import ( append, interp, quantile, - trapezoid, ) +from numpy._globals import _CopyMode + from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, @@ -495,6 +510,8 @@ from numpy.lib._histograms_impl import ( ) from numpy.lib._index_tricks_impl import ( + ndenumerate, + ndindex, ravel_multi_index, unravel_index, mgrid, @@ -554,6 +571,7 @@ from numpy.lib._polynomial_impl import ( from numpy.lib._shape_base_impl import ( column_stack, + row_stack, dstack, array_split, split, @@ -624,13 +642,10 @@ from numpy.matrixlib import ( bmat, ) -__all__ = [ - "emath", "show_config", "version", "__version__", "__array_namespace_info__", - +__all__ = [ # noqa: RUF022 # __numpy_submodules__ - "linalg", "fft", "dtypes", "random", "polynomial", "ma", "exceptions", "lib", - "ctypeslib", "testing", "test", "rec", "char", "strings", - "core", "typing", "f2py", + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", # _core.__all__ "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", @@ -648,8 +663,8 @@ __all__ = [ "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", - "bitwise_not", "full", "full_like", "matmul", "vecdot", "shares_memory", - "may_share_memory", "_get_promotion_state", "_set_promotion_state", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", @@ -664,7 +679,7 @@ __all__ = [ "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", - "logical_and", "logical_not", "logical_or", "logical_xor", "maximum", "minimum", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", @@ -683,7 +698,7 @@ __all__ = [ "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", "printoptions", "format_float_positional", "format_float_scientific", "require", "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", "_no_nep50_warning", + "errstate", # _core.function_base.__all__ "logspace", "linspace", "geomspace", # _core.getlimits.__all__ @@ -693,7 +708,8 @@ __all__ = [ "vstack", # _core.einsumfunc.__all__ "einsum", "einsum_path", - + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", # lib._histograms_impl.__all__ "histogram", "histogramdd", "histogram_bin_edges", # lib._nanfunctions_impl.__all__ @@ -701,29 +717,26 @@ __all__ = [ "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", "nanquantile", # lib._function_base_impl.__all__ - # NOTE: `trapz` is omitted because it is deprecated "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", - "kaiser", "i0", "meshgrid", "delete", "insert", "append", "interp", "quantile", - "trapezoid", + "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", # lib._twodim_base_impl.__all__ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", "triu_indices_from", # lib._shape_base_impl.__all__ - # NOTE: `row_stack` is omitted because it is deprecated "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", - "take_along_axis", "put_along_axis", + "take_along_axis", "put_along_axis", "row_stack", # lib._type_check_impl.__all__ "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", # lib._arraysetops_impl.__all__ - # NOTE: `in1d` is omitted because it is deprecated - "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", - "unique_all", "unique_counts", "unique_inverse", "unique_values", + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", # lib._ufunclike_impl.__all__ "fix", "isneginf", "isposinf", # lib._arraypad_impl.__all__ @@ -743,9 +756,9 @@ __all__ = [ "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", "diag_indices_from", - # matrixlib.__all__ - "matrix", "bmat", "asmatrix", -] + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip ### Constrained types (for internal use only) # Only use these for functions; never as generic type parameter. @@ -800,6 +813,7 @@ _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], Unpack[tuple[L[1], ...]]]) # _SCT = TypeVar("_SCT", bound=generic) _SCT_co = TypeVar("_SCT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number[Any]) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating[Any], default=floating[Any], covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) _IntegerT_co = TypeVar("_IntegerT_co", bound=integer[Any], default=integer[Any], covariant=True) @@ -833,18 +847,21 @@ _1D: TypeAlias = tuple[int] _2D: TypeAlias = tuple[int, int] _2Tuple: TypeAlias = tuple[_T, _T] -_ArrayUInt_co: TypeAlias = NDArray[np.bool | unsignedinteger[Any]] -_ArrayInt_co: TypeAlias = NDArray[np.bool | integer[Any]] -_ArrayFloat_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any]] -_ArrayComplex_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] -_ArrayNumber_co: TypeAlias = NDArray[np.bool | number[Any]] -_ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] -_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer[Any] | np.bool +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool _Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool _Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co -_ArrayIndexLike: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] _UnsignedIntegerCType: TypeAlias = type[ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 @@ -982,6 +999,8 @@ if sys.version_info >= (3, 11): _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co else: _ConvertibleToComplex: TypeAlias = complex | SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None _NDIterFlagsKind: TypeAlias = L[ "buffered", @@ -1038,6 +1057,16 @@ _IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] _TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] _TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + ### Protocols (for internal use only) @type_check_only @@ -1060,29 +1089,9 @@ class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... @type_check_only -class _HasShape(Protocol[_ShapeT_co]): - @property - def shape(self, /) -> _ShapeT_co: ... - -@type_check_only -class _HasShapeAndSupportsItem(_HasShape[_ShapeT_co], _SupportsItem[_T_co], Protocol[_ShapeT_co, _T_co]): - pass - -# matches any `x` on `x.type.item() -> _T_co`, e.g. `dtype[np.int8]` gives `_T_co: int` -@type_check_only -class _HashTypeWithItem(Protocol[_T_co]): +class _HasDType(Protocol[_T_co]): @property - def type(self, /) -> type[_SupportsItem[_T_co]]: ... - -# matches any `x` on `x.shape: _ShapeT_co` and `x.dtype.type.item() -> _T_co`, -# useful for capturing the item-type (`_T_co`) of the scalar-type of an array with -# specific shape (`_ShapeT_co`). -@type_check_only -class _HasShapeAndDTypeWithItem(Protocol[_ShapeT_co, _T_co]): - @property - def shape(self, /) -> _ShapeT_co: ... - @property - def dtype(self, /) -> _HashTypeWithItem[_T_co]: ... + def dtype(self, /) -> _T_co: ... @type_check_only class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): @@ -1112,6 +1121,7 @@ class _HasDateAttributes(Protocol): @property def year(self) -> int: ... + ### Mixins (for internal use only) @type_check_only @@ -1140,24 +1150,40 @@ class _IntegralMixin(_RealMixin): ### Public API __version__: Final[LiteralString] = ... -__array_api_version__: Final = "2023.12" -test: Final[PytestTester] = ... e: Final[float] = ... euler_gamma: Final[float] = ... +pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... -pi: Final[float] = ... - little_endian: Final[builtins.bool] = ... - False_: Final[np.bool[L[False]]] = ... True_: Final[np.bool[L[True]]] = ... - newaxis: Final[None] = None +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2023.12"]] = "2023.12" +test: Final[PytestTester] = ... + +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... + @final -class dtype(Generic[_SCT_co]): +class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): names: None | tuple[builtins.str, ...] def __hash__(self) -> int: ... @@ -2006,7 +2032,6 @@ class _ArrayOrScalarCommon: correction: float = ..., ) -> _ArrayT: ... - class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property @@ -2082,16 +2107,58 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload - def __getitem__(self, key: _ArrayIndexLike | tuple[_ArrayIndexLike, ...], /) -> ndarray[_Shape, _DType_co]: ... + def __getitem__(self, key: _ToIndices, /) -> ndarray[_Shape, _DType_co]: ... @overload def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype[Any]]: ... @overload def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... - @overload - def __setitem__(self: NDArray[void], key: str | list[str], value: ArrayLike, /) -> None: ... - @overload - def __setitem__(self, key: _ArrayIndexLike | tuple[_ArrayIndexLike, ...], value: ArrayLike, /) -> None: ... + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... @property def ctypes(self) -> _ctypes[int]: ... @@ -2108,38 +2175,33 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @property def flat(self) -> flatiter[Self]: ... - @overload # special casing for `StringDType`, which has no scalar type - def item(self: ndarray[Any, dtypes.StringDType], /) -> str: ... - @overload - def item(self: ndarray[Any, dtypes.StringDType], arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /) -> str: ... - @overload - def item(self: ndarray[Any, dtypes.StringDType], /, *args: SupportsIndex) -> str: ... @overload # use the same output type as that of the underlying `generic` - def item(self: _HasShapeAndDTypeWithItem[Any, _T], /) -> _T: ... - @overload - def item(self: _HasShapeAndDTypeWithItem[Any, _T], arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /) -> _T: ... - @overload - def item(self: _HasShapeAndDTypeWithItem[Any, _T], /, *args: SupportsIndex) -> _T: ... + def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + @overload # special casing for `StringDType`, which has no scalar type + def item( + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., + /, + *args: SupportsIndex, + ) -> str: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[()], _T], /) -> _T: ... + def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[int], _T], /) -> list[_T]: ... + def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[int, int], _T], /) -> list[list[_T]]: ... + def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[int, int, int], _T], /) -> list[list[list[_T]]]: ... + def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... @overload - def tolist(self: _HasShapeAndSupportsItem[Any, _T], /) -> _T | list[_T] | list[list[_T]] | list[list[list[Any]]]: ... + def tolist(self, /) -> Any: ... @overload def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... @overload - def resize(self, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - def setflags( - self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ... - ) -> None: ... + def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... def squeeze( self, @@ -2231,14 +2293,47 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): where: _ArrayLikeBool_co = True, ) -> _ArrayT: ... + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload def argpartition( self, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> NDArray[intp]: ... + @overload + def argpartition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... + # def diagonal( self, offset: SupportsIndex = ..., @@ -2258,22 +2353,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # `nonzero()` is deprecated for 0d arrays/generics def nonzero(self) -> tuple[NDArray[intp], ...]: ... - def partition( - self, - kth: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., - ) -> None: ... - # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable - def put( - self, - ind: _ArrayLikeInt_co, - v: ArrayLike, - mode: _ModeKind = ..., - ) -> None: ... + def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... @overload def searchsorted( # type: ignore[misc] @@ -2290,13 +2372,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): sorter: None | _ArrayLikeInt_co = ..., ) -> NDArray[intp]: ... - def setfield( - self, - val: ArrayLike, - dtype: DTypeLike, - offset: SupportsIndex = ..., - ) -> None: ... - def sort( self, axis: SupportsIndex = ..., @@ -2449,7 +2524,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[_SCT]: ... + ) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... @overload def astype( self, @@ -2458,40 +2533,32 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[Any]: ... + ) -> ndarray[_ShapeT_co, dtype[Any]]: ... - @overload - def view(self) -> Self: ... - @overload - def view(self, type: type[_ArrayT]) -> _ArrayT: ... - @overload - def view(self, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]: ... - @overload - def view(self, dtype: DTypeLike) -> NDArray[Any]: ... - @overload - def view( - self, - dtype: DTypeLike, - type: type[_ArrayT], - ) -> _ArrayT: ... - - @overload - def getfield( - self, - dtype: _DTypeLike[_SCT], - offset: SupportsIndex = ... - ) -> NDArray[_SCT]: ... - @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> NDArray[Any]: ... - - def __index__(self: NDArray[np.integer[Any]], /) -> int: ... - def __int__(self: NDArray[number[Any] | np.timedelta64 | np.bool | object_], /) -> int: ... - def __float__(self: NDArray[number[Any] | np.timedelta64 | np.bool | object_], /) -> float: ... - def __complex__(self: NDArray[number[Any] | np.bool | object_], /) -> complex: ... + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view(self, /, dtype: _DType | _HasDType[_DType]) -> ndarray[_ShapeT_co, _DType]: ... + @overload # (dtype: dtype[T]) + def view(self, /, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]: ... + @overload # (type: T) + def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (_: T) + def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype[Any]]: ... + @overload # (dtype: ?, type: type[T]) + def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + @overload + def getfield(self, dtype: _DTypeLike[_SCT], offset: SupportsIndex = 0) -> NDArray[_SCT]: ... + @overload + def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2505,12 +2572,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... - # The last overload is for catching recursive objects whose - # nesting is too deep. - # The first overload is for catching `bytes` (as they are a subtype of - # `Sequence[int]`) and `str`. As `str` is a recursive sequence of - # strings, it will pass through the final overload otherwise - + # @overload def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2518,10 +2580,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + # @overload def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2529,10 +2598,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __le__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + # @overload def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2540,10 +2616,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __gt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + # @overload def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2551,9 +2634,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __ge__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... # Unary ops @@ -2567,116 +2656,195 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # @overload # def __abs__(self: ndarray[_ShapeType, dtype[complex128]], /) -> ndarray[_ShapeType, dtype[float64]]: ... @overload - def __abs__( - self: ndarray[_ShapeT, dtype[complexfloating[_AnyNBitInexact]]], / - ) -> ndarray[_ShapeT, dtype[floating[_AnyNBitInexact]]]: ... + def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... @overload def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case + @overload + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[overload-overlap] @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload @@ -2686,20 +2854,34 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload @@ -2709,20 +2891,34 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload @@ -2732,22 +2928,36 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... # type: ignore[misc] + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2756,156 +2966,252 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] + def __pow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @@ -3050,6 +3356,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # @overload def __isub__( self: NDArray[unsignedinteger[Any]], @@ -3073,6 +3380,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # @overload def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3097,66 +3405,51 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__( - self: NDArray[complexfloating[Any]], - other: _ArrayLikeComplex_co, + def __ipow__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, /, ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... - + def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + + # @overload - def __ifloordiv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__( - self: NDArray[complexfloating[Any]], - other: _ArrayLikeComplex_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__imod__` @overload - def __ipow__( - self: NDArray[unsignedinteger[Any]], + def __ifloordiv__( + self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + / + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__ifloordiv__` @overload def __imod__( self: NDArray[unsignedinteger[Any]], @@ -3178,6 +3471,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__irshift__` @overload def __ilshift__( self: NDArray[unsignedinteger[Any]], @@ -3189,6 +3483,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__ilshift__` @overload def __irshift__( self: NDArray[unsignedinteger[Any]], @@ -3200,6 +3495,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__ixor__` and `__ior__` @overload def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3213,6 +3509,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__iand__` and `__ior__` @overload def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3226,6 +3523,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__iand__` and `__ixor__` @overload def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3239,6 +3537,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # @overload def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3256,6 +3555,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # def __dlpack__( self: NDArray[number[Any]], /, @@ -3681,7 +3981,7 @@ bool_ = bool # NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't # be made generic. @final -class object_(_RealMixin, generic): +class object_(_RealMixin, generic[Any]): @overload def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] @overload @@ -3695,6 +3995,9 @@ class object_(_RealMixin, generic): @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... if sys.version_info >= (3, 12): def __release_buffer__(self, buffer: memoryview, /) -> None: ... @@ -3840,6 +4143,9 @@ float32: TypeAlias = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] + def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... + + # @property def itemsize(self) -> L[8]: ... @property @@ -3974,7 +4280,15 @@ longdouble: TypeAlias = floating[_NBitLongDouble] # describing the two 64 bit floats representing its real and imaginary component class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): - def __init__(self, value: _ConvertibleToComplex | None = ..., /) -> None: ... + @overload + def __init__( + self, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> None: ... + @overload + def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... @property def real(self) -> floating[_NBit1]: ... # type: ignore[override] @@ -4058,6 +4372,17 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): complex64: TypeAlias = complexfloating[_32Bit, _32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... + + # @property def itemsize(self) -> L[16]: ... @property @@ -4120,18 +4445,23 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... @overload - def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... @overload - def __init__(self: timedelta64[int], value: int, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + @overload + def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... @overload def __init__( self: timedelta64[dt.timedelta], - value: dt.timedelta | int, + value: dt.timedelta | _IntLike_co, format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, ) -> None: ... @overload - def __init__(self, value: int | bytes | str | dt.timedelta | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... # NOTE: Only a limited number of units support conversion # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` @@ -4164,29 +4494,68 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __mul__(self, x: float | np.floating[Any] | np.integer[Any] | np.bool, /) -> timedelta64: ... __rmul__ = __mul__ + @overload + def __mod__(self, x: timedelta64[None | L[0]], /) -> timedelta64[None]: ... @overload def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # the L[0] makes __mod__ non-commutative, which the first two overloads reflect @overload - def __mod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __mod__(self, x: timedelta64[int], /) -> timedelta64[int]: ... + def __rmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __mod__(self, x: timedelta64, /) -> timedelta64: ... - __rmod__ = __mod__ # at runtime the outcomes differ, but the type signatures are the same + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + # keep in sync with __mod__ + @overload + def __divmod__(self, x: timedelta64[None | L[0]], /) -> tuple[int64, timedelta64[None]]: ... @overload def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... - __rdivmod__ = __divmod__ + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... @overload def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @@ -4266,11 +4635,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... @overload - def __init__(self: datetime64[dt.datetime], value: int | bytes | str, format: _TimeUnitSpec[_NativeTimeUnit], /) -> None: ... + def __init__( + self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> None: ... @overload - def __init__(self: datetime64[dt.date], value: int | bytes | str, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... @overload - def __init__(self, value: bytes | str | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... @overload def __add__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... @@ -4369,12 +4740,26 @@ class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): class bytes_(character[bytes], bytes): @overload - def __init__(self, value: object = ..., /) -> None: ... + def __new__(cls, o: object = ..., /) -> Self: ... @overload - def __init__(self, value: str, /, encoding: str = ..., errors: str = ...) -> None: ... + def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + + # + @overload + def __init__(self, o: object = ..., /) -> None: ... + @overload + def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... + + # def __bytes__(self, /) -> bytes: ... class str_(character[str], str): + @overload + def __new__(cls, value: object = ..., /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + + # @overload def __init__(self, value: object = ..., /) -> None: ... @overload @@ -4386,6 +4771,8 @@ class ufunc: @property def __name__(self) -> LiteralString: ... @property + def __qualname__(self) -> LiteralString: ... + @property def __doc__(self) -> str: ... @property def nin(self) -> int: ... @@ -4426,6 +4813,17 @@ class ufunc: # outputs, so we can't type it very precisely. def at(self, /, *args: Any, **kwargs: Any) -> None: ... + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype[Any] | type | None, ...], + *, + signature: tuple[dtype[Any] | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: builtins.bool = False, + ) -> tuple[dtype[Any], ...]: ... + # Parameters: `__name__`, `ntypes` and `identity` absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] @@ -4490,6 +4888,7 @@ logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] @@ -4519,6 +4918,7 @@ tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos @@ -4535,11 +4935,6 @@ bitwise_right_shift = right_shift permute_dims = transpose pow = power -class _CopyMode(enum.Enum): - ALWAYS: L[True] - IF_NEEDED: L[False] - NEVER: L[2] - class errstate: def __init__( self, @@ -4561,50 +4956,6 @@ class errstate: ) -> None: ... def __call__(self, func: _CallableT) -> _CallableT: ... -class ndenumerate(Generic[_SCT_co]): - @property - def iter(self) -> flatiter[NDArray[_SCT_co]]: ... - - @overload - def __new__( - cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]], - ) -> ndenumerate[_SCT]: ... - @overload - def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... - @overload - def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... - @overload - def __new__(cls, arr: builtins.bool | _NestedSequence[builtins.bool]) -> ndenumerate[np.bool]: ... - @overload - def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ... - @overload - def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... - @overload - def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... - @overload - def __new__(cls, arr: object) -> ndenumerate[object_]: ... - - # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) - @overload - def __next__( - self: ndenumerate[np.bool | datetime64 | timedelta64 | number[Any] | flexible], - /, - ) -> tuple[_Shape, _SCT_co]: ... - @overload - def __next__(self: ndenumerate[object_], /) -> tuple[_Shape, Any]: ... - @overload - def __next__(self, /) -> tuple[_Shape, _SCT_co]: ... - - def __iter__(self) -> Self: ... - -class ndindex: - @overload - def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... - @overload - def __init__(self, *shape: SupportsIndex) -> None: ... - def __iter__(self) -> Self: ... - def __next__(self) -> _Shape: ... - # TODO: The type of each `__next__` and `iters` return-type depends # on the length and dtype of `args`; we can't describe this behavior yet # as we lack variadics (PEP 646). @@ -4699,7 +5050,7 @@ class iinfo(Generic[_IntegerT_co]): class nditer: def __new__( cls, - op: ArrayLike | Sequence[ArrayLike], + op: ArrayLike | Sequence[ArrayLike | None], flags: None | Sequence[_NDIterFlagsKind] = ..., op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ..., op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., @@ -5038,7 +5389,7 @@ class matrix(ndarray[_2DShapeT_co, _DType_co]): def ptp(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[_2D, _DType_co]: ... - def tolist(self: _SupportsItem[_T]) -> list[list[_T]]: ... + def tolist(self: matrix[Any, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] def ravel(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] def flatten(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/_configtool.pyi b/numpy/_configtool.pyi new file mode 100644 index 000000000000..7e7363e797f3 --- /dev/null +++ b/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98a94973383a..d860aadedd83 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1663,8 +1663,8 @@ from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` - protocol. Generally, the returned NumPy array is a read-only view - of the input object. See [1]_ and [2]_ for more details. + protocol. Generally, the returned NumPy array is a view of the input + object. See [1]_ and [2]_ for more details. Parameters ---------- diff --git a/numpy/_core/_add_newdocs.pyi b/numpy/_core/_add_newdocs.pyi new file mode 100644 index 000000000000..b23c3b1adedd --- /dev/null +++ b/numpy/_core/_add_newdocs.pyi @@ -0,0 +1,3 @@ +from .overrides import get_array_function_like_doc as get_array_function_like_doc + +def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... diff --git a/numpy/_core/_add_newdocs_scalars.pyi b/numpy/_core/_add_newdocs_scalars.pyi new file mode 100644 index 000000000000..4a06c9b07d74 --- /dev/null +++ b/numpy/_core/_add_newdocs_scalars.pyi @@ -0,0 +1,16 @@ +from collections.abc import Iterable +from typing import Final + +import numpy as np + +possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ... +_system: Final[str] = ... +_machine: Final[str] = ... +_doc_alias_string: Final[str] = ... +_bool_docstring: Final[str] = ... +int_name: str = ... +float_name: str = ... + +def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... +def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi new file mode 100644 index 000000000000..c3e966e3f517 --- /dev/null +++ b/numpy/_core/_dtype.pyi @@ -0,0 +1,58 @@ +from typing import Any, Final, TypeAlias, TypedDict, overload, type_check_only +from typing import Literal as L + +from typing_extensions import ReadOnly, TypeVar + +import numpy as np + +### + +_T = TypeVar("_T") + +_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] + +@type_check_only +class _KindToStemType(TypedDict): + u: ReadOnly[L["uint"]] + i: ReadOnly[L["int"]] + c: ReadOnly[L["complex"]] + f: ReadOnly[L["float"]] + b: ReadOnly[L["bool"]] + V: ReadOnly[L["void"]] + O: ReadOnly[L["object"]] + M: ReadOnly[L["datetime"]] + m: ReadOnly[L["timedelta"]] + S: ReadOnly[L["bytes"]] + U: ReadOnly[L["str"]] + +### + +_kind_to_stem: Final[_KindToStemType] = ... + +# +def _kind_name(dtype: np.dtype[Any]) -> _Name: ... +def __str__(dtype: np.dtype[Any]) -> str: ... +def __repr__(dtype: np.dtype[Any]) -> str: ... + +# +def _isunsized(dtype: np.dtype[Any]) -> bool: ... +def _is_packed(dtype: np.dtype[Any]) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype[Any]) -> bool: ... + +# +def _construction_repr(dtype: np.dtype[Any], include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype[Any], short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype[Any]) -> str: ... +def _datetime_metadata_str(dtype: np.dtype[Any]) -> str: ... +def _struct_dict_str(dtype: np.dtype[Any], includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype[Any]) -> str: ... +def _struct_str(dtype: np.dtype[Any], include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype[Any]) -> str: ... +def _name_get(dtype: np.dtype[Any]) -> str: ... + +# +@overload +def _unpack_field(dtype: np.dtype[Any], offset: int, title: _T) -> tuple[np.dtype[Any], int, _T]: ... +@overload +def _unpack_field(dtype: np.dtype[Any], offset: int, title: None = None) -> tuple[np.dtype[Any], int, None]: ... +def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_dtype_ctypes.pyi b/numpy/_core/_dtype_ctypes.pyi new file mode 100644 index 000000000000..69438a2c1b4c --- /dev/null +++ b/numpy/_core/_dtype_ctypes.pyi @@ -0,0 +1,83 @@ +import _ctypes +import ctypes as ct +from typing import Any, overload + +import numpy as np + +# +@overload +def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... + +# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see +# https://github.com/numpy/numpy/issues/28360 + +# +def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ... +def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ... +def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ... + +# keep in sync with `dtype_from_ctypes_type` (minus the first overload) +@overload +def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi new file mode 100644 index 000000000000..5abfc779c212 --- /dev/null +++ b/numpy/_core/_exceptions.pyi @@ -0,0 +1,73 @@ +from collections.abc import Iterable +from typing import Any, Final, overload + +from typing_extensions import TypeVar, Unpack + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype[Any], ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype[Any], np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype[Any]] + to: Final[np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype[Any] + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 690554f66f94..15726fe3064e 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -1,23 +1,41 @@ -from typing import Any, TypeVar, overload, Generic import ctypes as ct +import re +from collections.abc import Callable, Iterable +from typing import Any, Final, Generic, overload -from numpy.typing import NDArray +from typing_extensions import Self, TypeVar, deprecated + +import numpy as np +import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) +_T_co = TypeVar("_T_co", covariant=True) _CT = TypeVar("_CT", bound=ct._CData) -_PT = TypeVar("_PT", bound=int) +_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) + +### + +IS_PYPY: Final[bool] = ... + +format_re: Final[re.Pattern[str]] = ... +sep_re: Final[re.Pattern[str]] = ... +space_re: Final[re.Pattern[str]] = ... + +### # TODO: Let the likes of `shape_as` and `strides_as` return `None` # for 0D arrays once we've got shape-support -class _ctypes(Generic[_PT]): +class _ctypes(Generic[_PT_co]): @overload - def __new__(cls, array: NDArray[Any], ptr: None = ...) -> _ctypes[None]: ... + def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ... @overload - def __new__(cls, array: NDArray[Any], ptr: _PT) -> _ctypes[_PT]: ... + def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ... + + # @property - def data(self) -> _PT: ... + def data(self) -> _PT_co: ... @property def shape(self) -> ct.Array[c_intp]: ... @property @@ -25,6 +43,30 @@ class _ctypes(Generic[_PT]): @property def _as_parameter_(self) -> ct.c_void_p: ... - def data_as(self, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... + # + def data_as(self, /, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + + # + @deprecated('"get_data" is deprecated. Use "data" instead') + def get_data(self, /) -> _PT_co: ... + @deprecated('"get_shape" is deprecated. Use "shape" instead') + def get_shape(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_strides" is deprecated. Use "strides" instead') + def get_strides(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') + def get_as_parameter(self, /) -> ct.c_void_p: ... + +class dummy_ctype(Generic[_T_co]): + _cls: type[_T_co] + + def __init__(self, /, cls: type[_T_co]) -> None: ... + def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __mul__(self, other: object, /) -> Self: ... + def __call__(self, /, *other: object) -> _T_co: ... + +def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... +def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... +def npy_ctypes_check(cls: type) -> bool: ... diff --git a/numpy/_core/_machar.pyi b/numpy/_core/_machar.pyi new file mode 100644 index 000000000000..5abfc779c212 --- /dev/null +++ b/numpy/_core/_machar.pyi @@ -0,0 +1,73 @@ +from collections.abc import Iterable +from typing import Any, Final, overload + +from typing_extensions import TypeVar, Unpack + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype[Any], ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype[Any], np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype[Any]] + to: Final[np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype[Any] + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi new file mode 100644 index 000000000000..45e2b8b9f761 --- /dev/null +++ b/numpy/_core/_methods.pyi @@ -0,0 +1,24 @@ +from collections.abc import Callable +from typing import Any, TypeAlias + +from typing_extensions import Concatenate + +import numpy as np + +from . import _exceptions as _exceptions + +### + +_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] + +### + +bool_dt: np.dtype[np.bool] = ... +umr_maximum: _Reduce2 = ... +umr_minimum: _Reduce2 = ... +umr_sum: _Reduce2 = ... +umr_prod: _Reduce2 = ... +umr_bitwise_count = np.bitwise_count +umr_any: _Reduce2 = ... +umr_all: _Reduce2 = ... +_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ... diff --git a/numpy/_core/_simd.pyi b/numpy/_core/_simd.pyi new file mode 100644 index 000000000000..70bb7077797e --- /dev/null +++ b/numpy/_core/_simd.pyi @@ -0,0 +1,25 @@ +from types import ModuleType +from typing import TypedDict, type_check_only + +# NOTE: these 5 are only defined on systems with an intel processor +SSE42: ModuleType | None = ... +FMA3: ModuleType | None = ... +AVX2: ModuleType | None = ... +AVX512F: ModuleType | None = ... +AVX512_SKX: ModuleType | None = ... + +baseline: ModuleType | None = ... + +@type_check_only +class SimdTargets(TypedDict): + SSE42: ModuleType | None + AVX2: ModuleType | None + FMA3: ModuleType | None + AVX512F: ModuleType | None + AVX512_SKX: ModuleType | None + baseline: ModuleType | None + +targets: SimdTargets = ... + +def clear_floatstatus() -> None: ... +def get_floatstatus() -> int: ... diff --git a/numpy/_core/_string_helpers.pyi b/numpy/_core/_string_helpers.pyi new file mode 100644 index 000000000000..6a85832b7a93 --- /dev/null +++ b/numpy/_core/_string_helpers.pyi @@ -0,0 +1,12 @@ +from typing import Final + +_all_chars: Final[tuple[str, ...]] = ... +_ascii_upper: Final[tuple[str, ...]] = ... +_ascii_lower: Final[tuple[str, ...]] = ... + +LOWER_TABLE: Final[tuple[str, ...]] = ... +UPPER_TABLE: Final[tuple[str, ...]] = ... + +def english_lower(s: str) -> str: ... +def english_upper(s: str) -> str: ... +def english_capitalize(s: str) -> str: ... diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 10728131ba3f..1f8be64d5e7b 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -1,42 +1,53 @@ from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypedDict, SupportsIndex, type_check_only # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager +from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypedDict, overload, type_check_only + +from typing_extensions import deprecated import numpy as np -from numpy import ( - integer, - timedelta64, - datetime64, - floating, - complexfloating, - void, - longdouble, - clongdouble, -) +from numpy._globals import _NoValueType from numpy._typing import NDArray, _CharLike_co, _FloatLike_co +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] + +### + _FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] +_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] +_Sign: TypeAlias = Literal["-", "+", " "] +_Trim: TypeAlias = Literal["k", ".", "0", "-"] +_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] @type_check_only class _FormatDict(TypedDict, total=False): bool: Callable[[np.bool], str] - int: Callable[[integer[Any]], str] - timedelta: Callable[[timedelta64], str] - datetime: Callable[[datetime64], str] - float: Callable[[floating[Any]], str] - longfloat: Callable[[longdouble], str] - complexfloat: Callable[[complexfloating[Any, Any]], str] - longcomplexfloat: Callable[[clongdouble], str] - void: Callable[[void], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] numpystr: Callable[[_CharLike_co], str] object: Callable[[object], str] all: Callable[[object], str] - int_kind: Callable[[integer[Any]], str] - float_kind: Callable[[floating[Any]], str] - complex_kind: Callable[[complexfloating[Any, Any]], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] str_kind: Callable[[_CharLike_co], str] @type_check_only @@ -48,10 +59,14 @@ class _FormatOptions(TypedDict): suppress: bool nanstr: str infstr: str - formatter: None | _FormatDict - sign: Literal["-", "+", " "] + formatter: _FormatDict | None + sign: _Sign floatmode: _FloatMode - legacy: Literal[False, "1.13", "1.21"] + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented def set_printoptions( precision: None | SupportsIndex = ..., @@ -62,37 +77,113 @@ def set_printoptions( nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ..., - override_repr: None | Callable[[NDArray[Any]], str] = ..., + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> None: ... def get_printoptions() -> _FormatOptions: ... + +# public numpy export +@overload # no style def array2string( a: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., - separator: str = ..., - prefix: str = ..., - # NOTE: With the `style` argument being deprecated, - # all arguments between `formatter` and `suffix` are de facto - # keyworld-only arguments + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + style: _NoValueType = ..., + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", *, - formatter: None | _FormatDict = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., - suffix: str = ..., - legacy: Literal[None, False, "1.13", "1.21"] = ..., + legacy: _Legacy | None = None, ) -> str: ... +@overload # style= (positional), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (keyword), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (positional), legacy!="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _LegacyNoStyle | None = None, +) -> str: ... +@overload # style= (keyword), legacy="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _LegacyNoStyle | None = None, +) -> str: ... + def format_float_scientific( x: _FloatLike_co, precision: None | int = ..., unique: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., + trim: _Trim = "k", sign: bool = ..., pad_left: None | int = ..., exp_digits: None | int = ..., @@ -103,7 +194,7 @@ def format_float_positional( precision: None | int = ..., unique: bool = ..., fractional: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., + trim: _Trim = "k", sign: bool = ..., pad_left: None | int = ..., pad_right: None | int = ..., @@ -130,8 +221,9 @@ def printoptions( nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + sign: None | _Sign = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index da2f8f636e59..3eb03b208ab6 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -85,7 +85,7 @@ def get_processor(): join('multiarray', 'stringdtype', 'static_string.c'), join('multiarray', 'strfuncs.c'), join('multiarray', 'usertypes.c'), - join('umath', 'dispatching.c'), + join('umath', 'dispatching.cpp'), join('umath', 'extobj.c'), join('umath', 'loops.c.src'), join('umath', 'reduction.c'), diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index e5e7d1b76523..c810de1aec5f 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1152,6 +1152,22 @@ def english_upper(s): TD(O), signature='(n),(n)->()', ), +'matvec': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.matvec'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(m,n),(n)->(m)', + ), +'vecmat': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.vecmat'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(n),(n,m)->(m)', + ), 'str_len': Ufunc(1, 1, Zero, docstrings.get('numpy._core.umath.str_len'), @@ -1576,13 +1592,10 @@ def make_code(funcdict, filename): #include "matmul.h" #include "clip.h" #include "dtypemeta.h" + #include "dispatching.h" #include "_umath_doc_generated.h" %s - /* Returns a borrowed ref of the second value in the matching info tuple */ - PyObject * - get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, - int ndtypes); static int InitOperators(PyObject *dictionary) { diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index f17a1221b371..c9ef4b8d533b 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -44,7 +44,7 @@ def add_newdoc(place, name, doc): skip = ( # gufuncs do not use the OUT_SCALAR replacement strings - 'matmul', 'vecdot', + 'matmul', 'vecdot', 'matvec', 'vecmat', # clip has 3 inputs, which is not handled by this 'clip', ) @@ -2793,7 +2793,9 @@ def add_newdoc(place, name, doc): See Also -------- - vdot : Complex-conjugating dot product. + vecdot : Complex-conjugating dot product for stacks of vectors. + matvec : Matrix-vector product for stacks of matrices and vectors. + vecmat : Vector-matrix product for stacks of vectors and matrices. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. dot : alternative matrix product with different broadcasting rules. @@ -2808,10 +2810,10 @@ def add_newdoc(place, name, doc): matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication - the prepended 1 is removed. + the prepended 1 is removed. (For stacks of vectors, use ``vecmat``.) - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication - the appended 1 is removed. + the appended 1 is removed. (For stacks of vectors, use ``matvec``.) ``matmul`` differs from ``dot`` in two important ways: @@ -2904,14 +2906,16 @@ def add_newdoc(place, name, doc): where :math:`\\overline{a_i}` denotes the complex conjugate if :math:`a_i` is complex and the identity otherwise. + .. versionadded:: 2.0.0 + Parameters ---------- x1, x2 : array_like Input arrays, scalars not allowed. out : ndarray, optional A location into which the result is stored. If provided, it must have - a shape that the broadcasted shape of `x1` and `x2` with the last axis - removed. If not provided or None, a freshly-allocated array is used. + the broadcasted shape of `x1` and `x2` with the last axis removed. + If not provided or None, a freshly-allocated array is used. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. @@ -2933,6 +2937,9 @@ def add_newdoc(place, name, doc): See Also -------- vdot : same but flattens arguments first + matmul : Matrix-matrix product. + vecmat : Vector-matrix product. + matvec : Matrix-vector product. einsum : Einstein summation convention. Examples @@ -2946,7 +2953,137 @@ def add_newdoc(place, name, doc): >>> np.vecdot(v, n) array([ 3., 8., 10.]) - .. versionadded:: 2.0.0 + """) + +add_newdoc('numpy._core.umath', 'matvec', + """ + Matrix-vector dot product of two arrays. + + Given a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x1`` and + a vector (or stack of vectors) :math:`\\mathbf{v}` in ``x2``, the + matrix-vector product is defined as: + + .. math:: + \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + + where the sum is over the last dimensions in ``x1`` and ``x2`` + (unless ``axes`` is specified). (For a matrix-vector product with the + vector conjugated, use ``np.vecmat(x2, x1.mT)``.) + + .. versionadded:: 2.2.0 + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The matrix-vector product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + vecmat : Vector-matrix product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Rotate a set of vectors from Y to X along Z. + + >>> a = np.array([[0., 1., 0.], + ... [-1., 0., 0.], + ... [0., 0., 1.]]) + >>> v = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 1.], + ... [0., 6., 8.]]) + >>> np.matvec(a, v) + array([[ 0., -1., 0.], + [ 1., 0., 0.], + [ 0., 0., 1.], + [ 6., 0., 8.]]) + + """) + +add_newdoc('numpy._core.umath', 'vecmat', + """ + Vector-matrix dot product of two arrays. + + Given a vector (or stack of vector) :math:`\\mathbf{v}` in ``x1`` and + a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x2``, the + vector-matrix product is defined as: + + .. math:: + \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + + where the sum is over the last dimension of ``x1`` and the one-but-last + dimensions in ``x2`` (unless `axes` is specified) and where + :math:`\\overline{v_i}` denotes the complex conjugate if :math:`v` + is complex and the identity otherwise. (For a non-conjugated vector-matrix + product, use ``np.matvec(x2.mT, x1)``.) + + .. versionadded:: 2.2.0 + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The vector-matrix product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and the one-but-last dimension of + ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + matvec : Matrix-vector product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Project a vector along X and Y. + + >>> v = np.array([0., 4., 2.]) + >>> a = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 0.]]) + >>> np.vecmat(v, a) + array([ 0., 4., 0.]) + """) add_newdoc('numpy._core.umath', 'modf', diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index d7de9c02e16e..00629a478c25 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -180,5 +180,6 @@ def einsum_path( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co | _DTypeLikeObject, - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = "greedy", + einsum_call: Literal[False] = False, ) -> tuple[list[Any], str]: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0465cc5aaa54..48648593d72f 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,8 +1,8 @@ +# ruff: noqa: ANN401 from collections.abc import Sequence from typing import ( Any, Literal, - NoReturn, Protocol, SupportsIndex, TypeAlias, @@ -10,6 +10,8 @@ from typing import ( overload, type_check_only, ) + +from _typeshed import Incomplete from typing_extensions import Never, deprecated import numpy as np @@ -34,6 +36,7 @@ from numpy import ( _SortSide, _CastingKind, ) +from numpy._globals import _NoValueType from numpy._typing import ( DTypeLike, _DTypeLike, @@ -44,11 +47,11 @@ from numpy._typing import ( _ShapeLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, + _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, _ArrayLikeObject_co, - _ArrayLikeTD64_co, _IntLike_co, _BoolLike_co, _ComplexLike_co, @@ -105,8 +108,7 @@ __all__ = [ _SCT = TypeVar("_SCT", bound=generic) _SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) -_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) -_SizeType = TypeVar("_SizeType", bound=int) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) _ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) @@ -120,7 +122,7 @@ class _SupportsShape(Protocol[_ShapeType_co]): _T = TypeVar("_T") _PyArray: TypeAlias = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = int | float | complex | bytes | str +_PyScalar: TypeAlias = float | complex | bytes | str @overload def take( @@ -134,7 +136,7 @@ def take( def take( a: ArrayLike, indices: _IntLike_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., ) -> Any: ... @@ -142,7 +144,7 @@ def take( def take( a: _ArrayLike[_SCT], indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., ) -> NDArray[_SCT]: ... @@ -150,7 +152,7 @@ def take( def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., ) -> NDArray[Any]: ... @@ -158,10 +160,19 @@ def take( def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, mode: _ModeKind = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def reshape( # shape: index @@ -258,21 +269,21 @@ def choose( def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayType = ..., + out: _ArrayT, mode: _ModeKind = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def repeat( a: _ArrayLike[_SCT], repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., ) -> NDArray[_SCT]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., ) -> NDArray[Any]: ... def put( @@ -298,70 +309,81 @@ def swapaxes( @overload def transpose( a: _ArrayLike[_SCT], - axes: None | _ShapeLike = ... + axes: _ShapeLike | None = ... ) -> NDArray[_SCT]: ... @overload def transpose( a: ArrayLike, - axes: None | _ShapeLike = ... + axes: _ShapeLike | None = ... ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def matrix_transpose(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload -def matrix_transpose(x: ArrayLike) -> NDArray[Any]: ... +def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... +# @overload def partition( a: _ArrayLike[_SCT], - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, ) -> NDArray[_SCT]: ... @overload +def partition( + a: _ArrayLike[np.void], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[np.void]: ... +@overload def partition( a: ArrayLike, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[Any]: ... +# def argpartition( a: ArrayLike, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... +# @overload def sort( a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., + stable: bool | None = ..., ) -> NDArray[_SCT]: ... @overload def sort( a: ArrayLike, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., + stable: bool | None = ..., ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., + stable: bool | None = ..., ) -> NDArray[intp]: ... @overload @@ -375,7 +397,7 @@ def argmax( @overload def argmax( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., *, keepdims: bool = ..., @@ -383,11 +405,19 @@ def argmax( @overload def argmax( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, + *, + keepdims: bool = ..., +) -> _ArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = ..., *, + out: _ArrayT, keepdims: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def argmin( @@ -400,7 +430,7 @@ def argmin( @overload def argmin( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., *, keepdims: bool = ..., @@ -408,59 +438,63 @@ def argmin( @overload def argmin( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, *, keepdims: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... @overload def searchsorted( a: ArrayLike, v: _ScalarLike_co, side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., # 1D int array + sorter: _ArrayLikeInt_co | None = ..., # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., # 1D int array + sorter: _ArrayLikeInt_co | None = ..., # 1D int array ) -> NDArray[intp]: ... -# unlike `reshape`, `resize` only accepts positive integers, so literal ints can be used -@overload -def resize(a: _ArrayLike[_SCT], new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[_SCT]]: ... -@overload -def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... +# @overload -def resize(a: _ArrayLike[_SCT], new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[_SCT]]: ... +def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... @overload -def resize(a: _ArrayLike[_SCT], new_shape: Sequence[SupportsIndex]) -> NDArray[_SCT]: ... +def resize(a: _ArrayLike[_SCT], new_shape: _AnyShapeType) -> np.ndarray[_AnyShapeType, np.dtype[_SCT]]: ... @overload -def resize(a: ArrayLike, new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[Any]]: ... +def resize(a: _ArrayLike[_SCT], new_shape: _ShapeLike) -> NDArray[_SCT]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[Any]]: ... @overload -def resize(a: ArrayLike, new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[Any]]: ... +def resize(a: ArrayLike, new_shape: _AnyShapeType) -> np.ndarray[_AnyShapeType, np.dtype[Any]]: ... @overload -def resize(a: ArrayLike, new_shape: Sequence[SupportsIndex]) -> NDArray[Any]: ... +def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... @overload def squeeze( a: _SCT, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., ) -> _SCT: ... @overload def squeeze( a: _ArrayLike[_SCT], - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., ) -> NDArray[_SCT]: ... @overload def squeeze( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., ) -> NDArray[Any]: ... @overload @@ -488,14 +522,24 @@ def trace( out: None = ..., ) -> Any: ... @overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload def trace( a: ArrayLike, # >= 2D array offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] @@ -519,9 +563,6 @@ def ravel( @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype[Any]]: ... -@overload -def nonzero(a: np.generic | np.ndarray[tuple[()], Any]) -> NoReturn: ... -@overload def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... # this prevents `Any` from being returned with Pyright @@ -547,120 +588,128 @@ def shape(a: ArrayLike) -> tuple[int, ...]: ... def compress( condition: _ArrayLikeBool_co, # 1D bool array a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., ) -> NDArray[_SCT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + axis: SupportsIndex | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def clip( a: _SCT, - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> _SCT: ... @overload def clip( a: _ScalarLike_co, - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> Any: ... @overload def clip( a: _ArrayLike[_SCT], - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> NDArray[_SCT]: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> NDArray[Any]: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: _ArrayType = ..., + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _ArrayT, *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., - dtype: DTypeLike, - where: None | _ArrayLikeBool_co = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., -) -> Any: ... +) -> _ArrayT: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: _ArrayType, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: ArrayLike = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., - dtype: DTypeLike = ..., - where: None | _ArrayLikeBool_co = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike, + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., -) -> _ArrayType: ... +) -> Any: ... @overload def sum( @@ -706,7 +755,7 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike, + axis: _ShapeLike | None, dtype: _DTypeLike[_SCT], out: None = ..., keepdims: bool = ..., @@ -716,7 +765,7 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., *, dtype: _DTypeLike[_SCT], out: None = ..., @@ -727,7 +776,7 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., @@ -737,130 +786,157 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., + *, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def all( a: ArrayLike, axis: None = None, out: None = None, - keepdims: Literal[False, 0] = False, + keepdims: Literal[False, 0] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... @overload def all( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: SupportsIndex = False, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, -) -> np.bool | NDArray[np.bool]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... @overload def all( a: ArrayLike, - axis: None | int | tuple[int, ...], - out: _ArrayType, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def all( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, *, - out: _ArrayType, - keepdims: SupportsIndex = False, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def any( a: ArrayLike, axis: None = None, out: None = None, - keepdims: Literal[False, 0] = False, + keepdims: Literal[False, 0] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... @overload def any( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: SupportsIndex = False, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, -) -> np.bool | NDArray[np.bool]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... @overload def any( a: ArrayLike, - axis: None | int | tuple[int, ...], - out: _ArrayType, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def any( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, *, - out: _ArrayType, - keepdims: SupportsIndex = False, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def cumsum( a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[_SCT]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None, + dtype: _DTypeLike[_SCT], out: None = ..., ) -> NDArray[_SCT]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def cumulative_sum( x: _ArrayLike[_SCT], /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -870,7 +946,7 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -880,8 +956,8 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_SCT], out: None = ..., include_initial: bool = ..., ) -> NDArray[_SCT]: ... @@ -890,7 +966,7 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., include_initial: bool = ..., @@ -900,11 +976,11 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., + out: _ArrayT, include_initial: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def ptp( @@ -916,17 +992,25 @@ def ptp( @overload def ptp( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., out: None = ..., keepdims: bool = ..., ) -> Any: ... @overload def ptp( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + out: _ArrayT, keepdims: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... @overload def amax( @@ -940,7 +1024,7 @@ def amax( @overload def amax( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -949,12 +1033,22 @@ def amax( @overload def amax( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... @overload def amin( @@ -968,7 +1062,7 @@ def amin( @overload def amin( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -977,12 +1071,22 @@ def amin( @overload def amin( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. @@ -1044,7 +1148,7 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., keepdims: bool = ..., @@ -1052,10 +1156,21 @@ def prod( where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + *, + dtype: _DTypeLike[_SCT], out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., @@ -1064,8 +1179,8 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None | DTypeLike = ..., + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -1074,84 +1189,111 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None | DTypeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... @overload def cumprod( a: _ArrayLikeBool_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[floating[Any]]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[complexfloating[Any, Any]]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[object_]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None, + dtype: _DTypeLike[_SCT], + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_SCT], out: None = ..., ) -> NDArray[_SCT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def cumulative_prod( x: _ArrayLikeBool_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1161,7 +1303,7 @@ def cumulative_prod( x: _ArrayLikeUInt_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1171,7 +1313,7 @@ def cumulative_prod( x: _ArrayLikeInt_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1181,7 +1323,7 @@ def cumulative_prod( x: _ArrayLikeFloat_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1191,7 +1333,7 @@ def cumulative_prod( x: _ArrayLikeComplex_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1201,7 +1343,7 @@ def cumulative_prod( x: _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1211,8 +1353,8 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_SCT], out: None = ..., include_initial: bool = ..., ) -> NDArray[_SCT]: ... @@ -1221,7 +1363,7 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., include_initial: bool = ..., @@ -1231,15 +1373,15 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., + out: _ArrayT, include_initial: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: None | int = ...) -> int: ... +def size(a: ArrayLike, axis: int | None = ...) -> int: ... @overload def around( @@ -1278,11 +1420,18 @@ def around( out: None = ..., ) -> NDArray[Any]: ... @overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex, + out: _ArrayT, +) -> _ArrayT: ... +@overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def mean( @@ -1290,9 +1439,9 @@ def mean( axis: None = ..., dtype: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> floating[Any]: ... @overload def mean( @@ -1300,50 +1449,50 @@ def mean( axis: None = ..., dtype: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> complexfloating[Any, Any]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating[Any]: ... @overload def mean( - a: _ArrayLikeTD64_co, + a: _ArrayLike[np.timedelta64], axis: None = ..., dtype: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> timedelta64: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None = ..., - out: None = ..., - keepdims: bool = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., - out: None = ..., - keepdims: Literal[False] = ..., + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., *, - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> _SCT | NDArray[_SCT]: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _SCT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1351,29 +1500,49 @@ def mean( *, dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool = ..., - where: _ArrayLikeBool_co = ..., -) -> _SCT | NDArray[_SCT]: ... + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _SCT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None, + dtype: _DTypeLike[_SCT], + out: None, + keepdims: Literal[True, 1], + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[_SCT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., -) -> Any: ... + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _SCT | NDArray[_SCT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., + axis: _ShapeLike | None = ..., *, - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _SCT | NDArray[_SCT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + out: None = ..., + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... @overload def std( @@ -1381,65 +1550,91 @@ def std( axis: None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> floating[Any]: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: None, + dtype: _DTypeLike[_SCT], out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _SCT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _SCT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - ddof: int | float = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... @overload def var( @@ -1447,65 +1642,91 @@ def var( axis: None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> floating[Any]: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: None, + dtype: _DTypeLike[_SCT], out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _SCT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _SCT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - ddof: int | float = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... max = amax min = amin diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 1d7ea3a2792e..12fdf677d0f5 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -29,8 +29,8 @@ def linspace( dtype: None = ..., axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... + device: L["cpu"] | None = ..., +) -> NDArray[floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -41,8 +41,20 @@ def linspace( dtype: None = ..., axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + device: L["cpu"] | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_SCT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -50,10 +62,10 @@ def linspace( num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[False] = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., + device: L["cpu"] | None = ..., ) -> NDArray[_SCT]: ... @overload def linspace( @@ -65,7 +77,7 @@ def linspace( dtype: DTypeLike = ..., axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = ..., ) -> NDArray[Any]: ... @overload def linspace( @@ -73,35 +85,35 @@ def linspace( stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., + *, + retstep: L[True], dtype: None = ..., axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[floating[Any]], floating[Any]]: ... + device: L["cpu"] | None = ..., +) -> tuple[NDArray[floating], floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., + *, + retstep: L[True], dtype: None = ..., axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ... + device: L["cpu"] | None = ..., +) -> tuple[NDArray[complexfloating], complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., + retstep: L[True], + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., + device: L["cpu"] | None = ..., ) -> tuple[NDArray[_SCT], _SCT]: ... @overload def linspace( @@ -109,11 +121,11 @@ def linspace( stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., + *, + retstep: L[True], dtype: DTypeLike = ..., axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = ..., ) -> tuple[NDArray[Any], Any]: ... @overload @@ -125,7 +137,7 @@ def logspace( base: _ArrayLikeFloat_co = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -135,7 +147,17 @@ def logspace( base: _ArrayLikeComplex_co = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + base: _ArrayLikeComplex_co, + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -143,7 +165,8 @@ def logspace( num: SupportsIndex = ..., endpoint: bool = ..., base: _ArrayLikeComplex_co = ..., - dtype: _DTypeLike[_SCT] = ..., + *, + dtype: _DTypeLike[_SCT], axis: SupportsIndex = ..., ) -> NDArray[_SCT]: ... @overload @@ -165,7 +188,7 @@ def geomspace( endpoint: bool = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -174,14 +197,24 @@ def geomspace( endpoint: bool = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - dtype: _DTypeLike[_SCT] = ..., + *, + dtype: _DTypeLike[_SCT], axis: SupportsIndex = ..., ) -> NDArray[_SCT]: ... @overload diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index ecbe3b49b229..37788a74557f 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -6,6 +6,10 @@ #include "npy_cpu.h" #include "utils.h" +#ifdef __cplusplus +extern "C" { +#endif + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN /* Always allow threading unless it was explicitly disabled at build time */ @@ -1922,4 +1926,8 @@ typedef struct { */ #undef NPY_DEPRECATED_INCLUDES +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index d11df12b7ceb..abc784bc686c 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -363,7 +363,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[0]; + return z._Val[0]; #else return creal(z); #endif @@ -377,7 +377,7 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[1]; + return z._Val[1]; #else return cimag(z); #endif @@ -391,7 +391,7 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[0]; + return z._Val[0]; #else return crealf(z); #endif @@ -405,7 +405,7 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[1]; + return z._Val[1]; #else return cimagf(z); #endif @@ -419,7 +419,7 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[0]; + return (npy_longdouble)z._Val[0]; #else return creall(z); #endif @@ -433,7 +433,7 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[1]; + return (npy_longdouble)z._Val[1]; #else return cimagl(z); #endif diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 979ceb2cfcfe..6b6fbd3490ae 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -713,7 +713,7 @@ py.extension_module('_multiarray_tests', src_file.process('src/multiarray/_multiarray_tests.c.src'), 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.c', + 'src/common/npy_hashtable.cpp', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, @@ -1042,7 +1042,7 @@ src_multiarray_umath_common = [ 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.c', + 'src/common/npy_hashtable.cpp', 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ucsnarrow.c', @@ -1153,7 +1153,7 @@ src_umath = umath_gen_headers + [ 'src/umath/ufunc_type_resolution.c', 'src/umath/clip.cpp', 'src/umath/clip.h', - 'src/umath/dispatching.c', + 'src/umath/dispatching.cpp', 'src/umath/extobj.c', 'src/umath/legacy_array_method.c', 'src/umath/override.c', @@ -1290,17 +1290,26 @@ python_sources = [ '__init__.py', '__init__.pyi', '_add_newdocs.py', + '_add_newdocs.pyi', '_add_newdocs_scalars.py', + '_add_newdocs_scalars.pyi', '_asarray.py', '_asarray.pyi', '_dtype.py', + '_dtype.pyi', '_dtype_ctypes.py', + '_dtype_ctypes.pyi', '_exceptions.py', + '_exceptions.pyi', '_internal.py', '_internal.pyi', '_machar.py', + '_machar.pyi', '_methods.py', + '_methods.pyi', + '_simd.pyi', '_string_helpers.py', + '_string_helpers.pyi', '_type_aliases.py', '_type_aliases.pyi', '_ufunc_config.py', @@ -1327,7 +1336,9 @@ python_sources = [ 'numerictypes.py', 'numerictypes.pyi', 'overrides.py', + 'overrides.pyi', 'printoptions.py', + 'printoptions.pyi', 'records.py', 'records.pyi', 'shape_base.py', @@ -1335,6 +1346,7 @@ python_sources = [ 'strings.py', 'strings.pyi', 'umath.py', + 'umath.pyi', ] py.install_sources( diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 449c3d2b4791..088de1073e7e 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -83,14 +83,15 @@ def _override___module__(): 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'remainder', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', 'power', - 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', 'sin', - 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'trunc', 'vecdot', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', ]: ufunc = namespace_names[ufunc_name] ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name _override___module__() diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 28cf5411645f..ea304c0789ab 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -49,6 +49,7 @@ from numpy import ( # type: ignore[attr-defined] signedinteger, floating, complexfloating, + _AnyShapeType, _OrderKACF, _OrderCF, _CastingKind, @@ -191,8 +192,6 @@ __all__ = [ "zeros", ] -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) _SCT = TypeVar("_SCT", bound=generic) _DType = TypeVar("_DType", bound=np.dtype[Any]) _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) @@ -206,10 +205,9 @@ _IDType = TypeVar("_IDType") _Nin = TypeVar("_Nin", bound=int) _Nout = TypeVar("_Nout", bound=int) -_SizeType = TypeVar("_SizeType", bound=int) -_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) -_1DArray: TypeAlias = ndarray[tuple[_SizeType], dtype[_SCT]] -_Array: TypeAlias = ndarray[_ShapeType, dtype[_SCT]] +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_Array: TypeAlias = ndarray[_ShapeT, dtype[_SCT]] +_Array1D: TypeAlias = ndarray[tuple[int], dtype[_SCT]] # Valid time units _UnitKind: TypeAlias = L[ @@ -250,70 +248,78 @@ class _ConstructorEmpty(Protocol): # 1-D shape @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[tuple[_SizeType], float64]: ... + ) -> _Array1D[float64]: ... @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[tuple[_SizeType], _DType]: ... + ) -> ndarray[tuple[int], _DType]: ... @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[tuple[_SizeType], _SCT]: ... + ) -> _Array1D[_SCT]: ... @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: DTypeLike, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[tuple[_SizeType], Any]: ... + ) -> _Array1D[Any]: ... # known shape @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_ShapeType, float64]: ... + ) -> _Array[_AnyShapeType, float64]: ... @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[_ShapeType, _DType]: ... + ) -> ndarray[_AnyShapeType, _DType]: ... @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_ShapeType, _SCT]: ... + ) -> _Array[_AnyShapeType, _SCT]: ... @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: DTypeLike, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_ShapeType, Any]: ... + ) -> _Array[_AnyShapeType, Any]: ... # unknown shape @overload @@ -349,7 +355,8 @@ class _ConstructorEmpty(Protocol): **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[Any]: ... -error: Final = Exception +# using `Final` or `TypeAlias` will break stubtest +error = Exception # from ._multiarray_umath ITEM_HASOBJECT: Final[L[1]] @@ -1036,7 +1043,7 @@ def arange( # type: ignore[misc] dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, signedinteger[Any]]: ... +) -> _Array1D[signedinteger]: ... @overload def arange( # type: ignore[misc] start: _IntLike_co, @@ -1046,7 +1053,7 @@ def arange( # type: ignore[misc] *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, signedinteger[Any]]: ... +) -> _Array1D[signedinteger]: ... @overload def arange( # type: ignore[misc] stop: _FloatLike_co, @@ -1054,7 +1061,7 @@ def arange( # type: ignore[misc] dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, floating[Any]]: ... +) -> _Array1D[floating]: ... @overload def arange( # type: ignore[misc] start: _FloatLike_co, @@ -1064,7 +1071,7 @@ def arange( # type: ignore[misc] *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, floating[Any]]: ... +) -> _Array1D[floating]: ... @overload def arange( stop: _TD64Like_co, @@ -1072,7 +1079,7 @@ def arange( dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, timedelta64]: ... +) -> _Array1D[timedelta64]: ... @overload def arange( start: _TD64Like_co, @@ -1082,7 +1089,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, timedelta64]: ... +) -> _Array1D[timedelta64]: ... @overload def arange( # both start and stop must always be specified for datetime64 start: datetime64, @@ -1092,7 +1099,7 @@ def arange( # both start and stop must always be specified for datetime64 *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, datetime64]: ... +) -> _Array1D[datetime64]: ... @overload def arange( stop: Any, @@ -1100,7 +1107,7 @@ def arange( dtype: _DTypeLike[_SCT], device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, _SCT]: ... +) -> _Array1D[_SCT]: ... @overload def arange( start: Any, @@ -1110,7 +1117,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, _SCT]: ... +) -> _Array1D[_SCT]: ... @overload def arange( stop: Any, /, @@ -1118,7 +1125,7 @@ def arange( dtype: DTypeLike, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, Any]: ... +) -> _Array1D[Any]: ... @overload def arange( start: Any, @@ -1128,7 +1135,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, Any]: ... +) -> _Array1D[Any]: ... def datetime_data( dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 41c9873877e0..23e8a95878bb 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -44,9 +44,17 @@ from numpy import ( float64, timedelta64, object_, + _AnyShapeType, _OrderKACF, _OrderCF, ) +from .fromnumeric import ( + all as all, + any as any, + argpartition as argpartition, + matrix_transpose as matrix_transpose, + mean as mean, +) from .multiarray import ( # re-exports arange, @@ -102,6 +110,7 @@ from numpy._typing import ( _ArrayLikeTD64_co, _ArrayLikeObject_co, _ArrayLikeUnknown, + _NestedSequence, ) __all__ = [ @@ -183,7 +192,6 @@ _T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) _DType = TypeVar("_DType", bound=np.dtype[Any]) _ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) -_SizeType = TypeVar("_SizeType", bound=int) _ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) _CorrelateMode: TypeAlias = L["valid", "same", "full"] @@ -296,69 +304,69 @@ def ones_like( # 1-D shape @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: _SCT, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[_SizeType], _SCT]: ... +) -> _Array[tuple[int], _SCT]: ... @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: Any, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[tuple[_SizeType], _DType]: ... +) -> np.ndarray[tuple[int], _DType]: ... @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: Any, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[_SizeType], _SCT]: ... +) -> _Array[tuple[int], _SCT]: ... @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: Any, dtype: None | DTypeLike = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[_SizeType], Any]: ... +) -> _Array[tuple[int], Any]: ... # known shape @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: _SCT, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_ShapeType, _SCT]: ... +) -> _Array[_AnyShapeType, _SCT]: ... @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: Any, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[_ShapeType, _DType]: ... +) -> np.ndarray[_AnyShapeType, _DType]: ... @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: Any, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_ShapeType, _SCT]: ... +) -> _Array[_AnyShapeType, _SCT]: ... @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: Any, dtype: None | DTypeLike = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_ShapeType, Any]: ... +) -> _Array[_AnyShapeType, Any]: ... # unknown shape @overload def full( @@ -449,21 +457,19 @@ def full_like( device: None | L["cpu"] = ..., ) -> NDArray[Any]: ... +# @overload -def count_nonzero( - a: ArrayLike, - axis: None = ..., - *, - keepdims: L[False] = ..., -) -> int: ... +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> int: ... +@overload +def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... @overload def count_nonzero( - a: ArrayLike, - axis: _ShapeLike = ..., - *, - keepdims: bool = ..., -) -> Any: ... # TODO: np.intp or ndarray[np.intp] + a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] +) -> NDArray[np.intp]: ... +@overload +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +# def isfortran(a: NDArray[Any] | generic) -> bool: ... def argwhere(a: ArrayLike) -> NDArray[intp]: ... @@ -699,8 +705,8 @@ def moveaxis( @overload def cross( - x1: _ArrayLikeUnknown, - x2: _ArrayLikeUnknown, + a: _ArrayLikeUnknown, + b: _ArrayLikeUnknown, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -708,8 +714,8 @@ def cross( ) -> NDArray[Any]: ... @overload def cross( - x1: _ArrayLikeBool_co, - x2: _ArrayLikeBool_co, + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -717,8 +723,8 @@ def cross( ) -> NoReturn: ... @overload def cross( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -726,8 +732,8 @@ def cross( ) -> NDArray[unsignedinteger[Any]]: ... @overload def cross( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -735,8 +741,8 @@ def cross( ) -> NDArray[signedinteger[Any]]: ... @overload def cross( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -744,8 +750,8 @@ def cross( ) -> NDArray[floating[Any]]: ... @overload def cross( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -753,8 +759,8 @@ def cross( ) -> NDArray[complexfloating[Any, Any]]: ... @overload def cross( - x1: _ArrayLikeObject_co, - x2: _ArrayLikeObject_co, + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -872,15 +878,19 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... @overload def astype( - x: NDArray[Any], + x: ndarray[_ShapeType, dtype[Any]], dtype: _DTypeLike[_SCT], + /, + *, copy: bool = ..., device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... +) -> ndarray[_ShapeType, dtype[_SCT]]: ... @overload def astype( - x: NDArray[Any], + x: ndarray[_ShapeType, dtype[Any]], dtype: DTypeLike, + /, + *, copy: bool = ..., device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... +) -> ndarray[_ShapeType, dtype[Any]]: ... diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index c2a7cb6261d4..ace5913f0f84 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -177,12 +177,9 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -def isdtype( - dtype: dtype[Any] | type[Any], - kind: DTypeLike | tuple[DTypeLike, ...], -) -> builtins.bool: ... +def isdtype(dtype: dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... typecodes: _TypeCodes ScalarType: tuple[ diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 41f42ab26fae..cb466408cd39 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -19,7 +19,7 @@ compatible with that passed in via this argument.""" ) -def get_array_function_like_doc(public_api, docstring_template=None): +def get_array_function_like_doc(public_api, docstring_template=""): ARRAY_FUNCTIONS.add(public_api) docstring = public_api.__doc__ or docstring_template return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi new file mode 100644 index 000000000000..9babbcc26a0b --- /dev/null +++ b/numpy/_core/overrides.pyi @@ -0,0 +1,50 @@ +from collections.abc import Callable, Iterable +from typing import Any, Final, NamedTuple + +from typing_extensions import ParamSpec, TypeVar + +from numpy._typing import _SupportsArrayFunc + +_T = TypeVar("_T") +_Tss = ParamSpec("_Tss") +_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) + +### + +ARRAY_FUNCTIONS: set[Callable[..., Any]] = ... +array_function_like_doc: Final[str] = ... + +class ArgSpec(NamedTuple): + args: list[str] + varargs: str | None + keywords: str | None + defaults: tuple[Any, ...] + +def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... + +# +def verify_matching_signatures( + implementation: Callable[_Tss, object], + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], +) -> None: ... + +# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with +# the original wrapped callable stored in the `._implementation` attribute. It checks +# for any `__array_function__` of the values of specific arguments that the dispatcher +# specifies. Since the dispatcher only returns an iterable of passed array-like args, +# this overridable behaviour is impossible to annotate. +def array_function_dispatch( + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = False, +) -> Callable[[_FuncT], _FuncT]: ... + +# +def array_function_from_dispatcher( + implementation: Callable[_Tss, _T], + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = True, +) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... diff --git a/numpy/_core/printoptions.pyi b/numpy/_core/printoptions.pyi new file mode 100644 index 000000000000..bd7c7b40692d --- /dev/null +++ b/numpy/_core/printoptions.pyi @@ -0,0 +1,28 @@ +from collections.abc import Callable +from contextvars import ContextVar +from typing import Any, Final, TypedDict + +from .arrayprint import _FormatDict + +__all__ = ["format_options"] + +### + +class _FormatOptionsDict(TypedDict): + edgeitems: int + threshold: int + floatmode: str + precision: int + suppress: bool + linewidth: int + nanstr: str + infstr: str + sign: str + formatter: _FormatDict | None + legacy: int + override_repr: Callable[[Any], str] | None + +### + +default_format_options_dict: Final[_FormatOptionsDict] = ... +format_options: ContextVar[_FormatOptionsDict] diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index ef60803ffeb4..b4ca5ff0e3bf 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,56 +1,33 @@ -from _typeshed import StrOrBytesPath -from collections.abc import Sequence, Iterable -from types import EllipsisType -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, - Protocol, - SupportsIndex, - Literal, - type_check_only -) +# ruff: noqa: ANN401 +# pyright: reportSelfClsParameterName=false +from collections.abc import Iterable, Sequence +from typing import Any, ClassVar, Literal, Protocol, SupportsIndex, TypeAlias, overload, type_check_only -from numpy import ( - ndarray, - dtype, - generic, - void, - _ByteOrder, - _SupportsBuffer, - _OrderKACF, -) +from _typeshed import StrOrBytesPath +from typing_extensions import TypeVar -from numpy._typing import ( - ArrayLike, - DTypeLike, - NDArray, - _Shape, - _ShapeLike, - _ArrayLikeInt_co, - _ArrayLikeVoid_co, - _NestedSequence, -) +import numpy as np +from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeVoid_co, _NestedSequence, _ShapeLike __all__ = [ - "record", - "recarray", + "array", + "find_duplicate", "format_parser", "fromarrays", + "fromfile", "fromrecords", "fromstring", - "fromfile", - "array", - "find_duplicate", + "recarray", + "record", ] _T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +_SCT = TypeVar("_SCT", bound=np.generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], covariant=True) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_RecArray: TypeAlias = recarray[Any, dtype[_SCT]] +_RecArray: TypeAlias = recarray[Any, np.dtype[_SCT]] @type_check_only class _SupportsReadInto(Protocol): @@ -58,7 +35,10 @@ class _SupportsReadInto(Protocol): def tell(self, /) -> int: ... def readinto(self, buffer: memoryview, /) -> int: ... -class record(void): +### + +# exported in `numpy.rec` +class record(np.void): def __getattribute__(self, attr: str) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike) -> None: ... def pprint(self) -> str: ... @@ -67,281 +47,269 @@ class record(void): @overload def __getitem__(self, key: list[str]) -> record: ... -class recarray(ndarray[_ShapeT_co, _DType_co]): - # NOTE: While not strictly mandatory, we're demanding here that arguments - # for the `format_parser`- and `dtype`-based dtype constructors are - # mutually exclusive +# exported in `numpy.rec` +class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" @overload def __new__( subtype, shape: _ShapeLike, - dtype: None = ..., - buf: None | _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., + dtype: None = None, + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - byteorder: None | _ByteOrder = ..., - aligned: bool = ..., - order: _OrderKACF = ..., - ) -> recarray[Any, dtype[record]]: ... + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + order: _OrderKACF = "C", + ) -> _RecArray[record]: ... @overload def __new__( subtype, shape: _ShapeLike, dtype: DTypeLike, - buf: None | _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - byteorder: None = ..., - aligned: Literal[False] = ..., - order: _OrderKACF = ..., - ) -> recarray[Any, dtype[Any]]: ... - def __array_finalize__(self, obj: object) -> None: ... - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... - @overload - def __getitem__(self, indx: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - )) -> Any: ... - @overload - def __getitem__(self: recarray[Any, dtype[void]], indx: ( - None - | slice - | EllipsisType - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> recarray[_Shape, _DType_co]: ... - @overload - def __getitem__(self, indx: ( - None - | slice - | EllipsisType - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> ndarray[_Shape, _DType_co]: ... - @overload - def __getitem__(self, indx: str) -> NDArray[Any]: ... - @overload - def __getitem__(self, indx: list[str]) -> recarray[_ShapeT_co, dtype[record]]: ... + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + byteorder: None = None, + aligned: Literal[False] = False, + order: _OrderKACF = "C", + ) -> _RecArray[Any]: ... + def __array_finalize__(self, /, obj: object) -> None: ... + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + # @overload - def field(self, attr: int | str, val: None = ...) -> Any: ... + def field(self, /, attr: int | str, val: ArrayLike) -> None: ... @overload - def field(self, attr: int | str, val: ArrayLike) -> None: ... + def field(self, /, attr: int | str, val: None = None) -> Any: ... +# exported in `numpy.rec` class format_parser: - dtype: dtype[void] + dtype: np.dtype[np.void] def __init__( self, + /, formats: DTypeLike, - names: None | str | Sequence[str], - titles: None | str | Sequence[str], - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None, + titles: str | Sequence[str] | None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> None: ... +# exported in `numpy.rec` @overload def fromarrays( arrayList: Iterable[ArrayLike], - dtype: DTypeLike = ..., - shape: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[Any]: ... @overload def fromarrays( arrayList: Iterable[ArrayLike], - dtype: None = ..., - shape: None | _ShapeLike = ..., + dtype: None = None, + shape: _ShapeLike | None = None, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], - dtype: DTypeLike = ..., - shape: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], - dtype: None = ..., - shape: None | _ShapeLike = ..., + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: None = None, + shape: _ShapeLike | None = None, *, - formats: DTypeLike = ..., - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def fromstring( datastring: _SupportsBuffer, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[record]: ... @overload def fromstring( datastring: _SupportsBuffer, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[Any]: ... @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def array( obj: _SCT | NDArray[_SCT], - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[_SCT]: ... @overload def array( obj: ArrayLike, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: ArrayLike, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... @overload def array( obj: None, dtype: DTypeLike, shape: _ShapeLike, - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: None, - dtype: None = ..., + dtype: None = None, *, shape: _ShapeLike, - offset: int = ..., + offset: int = 0, + strides: tuple[int, ...] | None = None, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... @overload def array( obj: _SupportsReadInto, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: _SupportsReadInto, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... +# exported in `numpy.rec` def find_duplicate(list: Iterable[_T]) -> list[_T]: ... diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 0dadded9423a..decb7be48f9e 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,14 +1,8 @@ from collections.abc import Sequence -from typing import TypeVar, overload, Any, SupportsIndex +from typing import Any, SupportsIndex, TypeVar, overload -from numpy import generic, _CastingKind -from numpy._typing import ( - NDArray, - ArrayLike, - DTypeLike, - _ArrayLike, - _DTypeLike, -) +from numpy import _CastingKind, generic +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike __all__ = [ "atleast_1d", @@ -22,29 +16,54 @@ __all__ = [ ] _SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_SCT1 = TypeVar("_SCT1", bound=generic) +_SCT2 = TypeVar("_SCT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +### + +@overload +def atleast_1d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... @overload -def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_1d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... @overload -def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_1d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# +@overload +def atleast_2d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload -def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_2d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... @overload -def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_2d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... @overload -def atleast_2d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# +@overload +def atleast_3d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... @overload -def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_3d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... @overload -def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_3d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# @overload def vstack( tup: Sequence[_ArrayLike[_SCT]], @@ -119,12 +138,21 @@ def stack( @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex, + out: _ArrayT, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... -) -> _ArrayType: ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... @overload def unstack( diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index 910028dcde7c..f5b41d7068be 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -9,11 +9,18 @@ #include "numpy/npy_common.h" -#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ +#ifdef __cplusplus + extern "C++" { + #include + } + #define _NPY_USING_STD using namespace std + #define _Atomic(tp) atomic + #define STDC_ATOMICS +#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ && !defined(__STDC_NO_ATOMICS__) -// TODO: support C++ atomics as well if this header is ever needed in C++ #include #include + #define _NPY_USING_STD #define STDC_ATOMICS #elif _MSC_VER #include @@ -35,6 +42,7 @@ static inline npy_uint8 npy_atomic_load_uint8(const npy_uint8 *obj) { #ifdef STDC_ATOMICS + _NPY_USING_STD; return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); #elif defined(MSC_ATOMICS) #if defined(_M_X64) || defined(_M_IX86) @@ -50,6 +58,7 @@ npy_atomic_load_uint8(const npy_uint8 *obj) { static inline void* npy_atomic_load_ptr(const void *obj) { #ifdef STDC_ATOMICS + _NPY_USING_STD; return atomic_load((const _Atomic(void *)*)obj); #elif defined(MSC_ATOMICS) #if SIZEOF_VOID_P == 8 @@ -73,6 +82,7 @@ npy_atomic_load_ptr(const void *obj) { static inline void npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { #ifdef STDC_ATOMICS + _NPY_USING_STD; atomic_store((_Atomic(uint8_t)*)obj, value); #elif defined(MSC_ATOMICS) _InterlockedExchange8((volatile char *)obj, (char)value); @@ -85,6 +95,7 @@ static inline void npy_atomic_store_ptr(void *obj, void *value) { #ifdef STDC_ATOMICS + _NPY_USING_STD; atomic_store((_Atomic(void *)*)obj, value); #elif defined(MSC_ATOMICS) _InterlockedExchangePointer((void * volatile *)obj, (void *)value); diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 7c0a4c60294c..2c1e064afda5 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -749,34 +749,33 @@ npy__cpu_init_features_linux(void) #endif } #ifdef __arm__ + npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0; + if (npy__cpu_have[NPY_CPU_FEATURE_NEON]) { + npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & NPY__HWCAP_HALF) != 0; + npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & NPY__HWCAP_VFPv4) != 0; + } // Detect Arm8 (aarch32 state) if ((hwcap2 & NPY__HWCAP2_AES) || (hwcap2 & NPY__HWCAP2_SHA1) || (hwcap2 & NPY__HWCAP2_SHA2) || (hwcap2 & NPY__HWCAP2_PMULL) || (hwcap2 & NPY__HWCAP2_CRC32)) { - hwcap = hwcap2; + npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = npy__cpu_have[NPY_CPU_FEATURE_NEON]; + } #else - if (1) - { - if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { - // Is this could happen? maybe disabled by kernel - // BTW this will break the baseline of AARCH64 - return 1; - } -#endif - npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0; - npy__cpu_have[NPY_CPU_FEATURE_SVE] = (hwcap & NPY__HWCAP_SVE) != 0; - npy__cpu_init_features_arm8(); - } else { - npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_NEON]) { - npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & NPY__HWCAP_HALF) != 0; - npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & NPY__HWCAP_VFPv4) != 0; - } + if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { + // Is this could happen? maybe disabled by kernel + // BTW this will break the baseline of AARCH64 + return 1; } + npy__cpu_init_features_arm8(); +#endif + npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0; +#ifndef __arm__ + npy__cpu_have[NPY_CPU_FEATURE_SVE] = (hwcap & NPY__HWCAP_SVE) != 0; +#endif return 1; } #endif diff --git a/numpy/_core/src/common/npy_cpuinfo_parser.h b/numpy/_core/src/common/npy_cpuinfo_parser.h index 154c4245ba2b..30f2976d28b6 100644 --- a/numpy/_core/src/common/npy_cpuinfo_parser.h +++ b/numpy/_core/src/common/npy_cpuinfo_parser.h @@ -36,25 +36,43 @@ #define NPY__HWCAP 16 #define NPY__HWCAP2 26 -// arch/arm/include/uapi/asm/hwcap.h -#define NPY__HWCAP_HALF (1 << 1) -#define NPY__HWCAP_NEON (1 << 12) -#define NPY__HWCAP_VFPv3 (1 << 13) -#define NPY__HWCAP_VFPv4 (1 << 16) -#define NPY__HWCAP2_AES (1 << 0) -#define NPY__HWCAP2_PMULL (1 << 1) -#define NPY__HWCAP2_SHA1 (1 << 2) -#define NPY__HWCAP2_SHA2 (1 << 3) -#define NPY__HWCAP2_CRC32 (1 << 4) -// arch/arm64/include/uapi/asm/hwcap.h -#define NPY__HWCAP_FP (1 << 0) -#define NPY__HWCAP_ASIMD (1 << 1) -#define NPY__HWCAP_FPHP (1 << 9) -#define NPY__HWCAP_ASIMDHP (1 << 10) -#define NPY__HWCAP_ASIMDDP (1 << 20) -#define NPY__HWCAP_SVE (1 << 22) -#define NPY__HWCAP_ASIMDFHM (1 << 23) -/* +#ifdef __arm__ + // arch/arm/include/uapi/asm/hwcap.h + #define NPY__HWCAP_HALF (1 << 1) + #define NPY__HWCAP_NEON (1 << 12) + #define NPY__HWCAP_VFPv3 (1 << 13) + #define NPY__HWCAP_VFPv4 (1 << 16) + + #define NPY__HWCAP_FPHP (1 << 22) + #define NPY__HWCAP_ASIMDHP (1 << 23) + #define NPY__HWCAP_ASIMDDP (1 << 24) + #define NPY__HWCAP_ASIMDFHM (1 << 25) + + #define NPY__HWCAP2_AES (1 << 0) + #define NPY__HWCAP2_PMULL (1 << 1) + #define NPY__HWCAP2_SHA1 (1 << 2) + #define NPY__HWCAP2_SHA2 (1 << 3) + #define NPY__HWCAP2_CRC32 (1 << 4) +#else + // arch/arm64/include/uapi/asm/hwcap.h + #define NPY__HWCAP_FP (1 << 0) + #define NPY__HWCAP_ASIMD (1 << 1) + + #define NPY__HWCAP_FPHP (1 << 9) + #define NPY__HWCAP_ASIMDHP (1 << 10) + #define NPY__HWCAP_ASIMDDP (1 << 20) + #define NPY__HWCAP_ASIMDFHM (1 << 23) + + #define NPY__HWCAP_AES (1 << 3) + #define NPY__HWCAP_PMULL (1 << 4) + #define NPY__HWCAP_SHA1 (1 << 5) + #define NPY__HWCAP_SHA2 (1 << 6) + #define NPY__HWCAP_CRC32 (1 << 7) + #define NPY__HWCAP_SVE (1 << 22) +#endif + + +/* * Get the size of a file by reading it until the end. This is needed * because files under /proc do not always return a valid size when * using fseek(0, SEEK_END) + ftell(). Nor can they be mmap()-ed. @@ -87,7 +105,7 @@ get_file_size(const char* pathname) return result; } -/* +/* * Read the content of /proc/cpuinfo into a user-provided buffer. * Return the length of the data, or -1 on error. Does *not* * zero-terminate the content. Will not read more @@ -123,7 +141,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize) return count; } -/* +/* * Extract the content of a the first occurrence of a given field in * the content of /proc/cpuinfo and return it as a heap-allocated * string that must be freed by the caller. @@ -182,7 +200,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field) return result; } -/* +/* * Checks that a space-separated list of items contains one given 'item'. * Returns 1 if found, 0 otherwise. */ @@ -220,44 +238,51 @@ has_list_item(const char* list, const char* item) return 0; } -static void setHwcap(char* cpuFeatures, unsigned long* hwcap) { - *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; - *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; - - *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; - *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; - *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; -} - static int get_feature_from_proc_cpuinfo(unsigned long *hwcap, unsigned long *hwcap2) { - char* cpuinfo = NULL; - int cpuinfo_len; - cpuinfo_len = get_file_size("/proc/cpuinfo"); + *hwcap = 0; + *hwcap2 = 0; + + int cpuinfo_len = get_file_size("/proc/cpuinfo"); if (cpuinfo_len < 0) { return 0; } - cpuinfo = malloc(cpuinfo_len); + char *cpuinfo = malloc(cpuinfo_len); if (cpuinfo == NULL) { return 0; } + cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, cpuinfo_len); - char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); - if(cpuFeatures == NULL) { + char *cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); + if (cpuFeatures == NULL) { + free(cpuinfo); return 0; } - setHwcap(cpuFeatures, hwcap); - *hwcap2 |= *hwcap; + *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; +#ifdef __arm__ + *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; + *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; *hwcap2 |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP2_AES : 0; *hwcap2 |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP2_PMULL : 0; *hwcap2 |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP2_SHA1 : 0; *hwcap2 |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP2_SHA2 : 0; *hwcap2 |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP2_CRC32 : 0; +#else + *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; + *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; + *hwcap |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP_AES : 0; + *hwcap |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP_PMULL : 0; + *hwcap |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP_SHA1 : 0; + *hwcap |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP_SHA2 : 0; + *hwcap |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP_CRC32 : 0; +#endif + free(cpuinfo); + free(cpuFeatures); return 1; } #endif /* NUMPY_CORE_SRC_COMMON_NPY_CPUINFO_PARSER_H_ */ diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.cpp similarity index 92% rename from numpy/_core/src/common/npy_hashtable.c rename to numpy/_core/src/common/npy_hashtable.cpp index 596e62cf8354..a4244fae88cb 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -12,8 +12,12 @@ * case is likely desired. */ +#include +#include + #include "templ_common.h" #include "npy_hashtable.h" +#include @@ -89,7 +93,7 @@ find_item(PyArrayIdentityHash const *tb, PyObject *const *key) NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len) { - PyArrayIdentityHash *res = PyMem_Malloc(sizeof(PyArrayIdentityHash)); + PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); if (res == NULL) { PyErr_NoMemory(); return NULL; @@ -100,12 +104,21 @@ PyArrayIdentityHash_New(int key_len) res->size = 4; /* Start with a size of 4 */ res->nelem = 0; - res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); + res->buckets = (PyObject **)PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); PyMem_Free(res); return NULL; } + +#ifdef Py_GIL_DISABLED + res->mutex = new(std::nothrow) std::shared_mutex(); + if (res->mutex == nullptr) { + PyErr_NoMemory(); + PyMem_Free(res); + return NULL; + } +#endif return res; } @@ -114,6 +127,9 @@ NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { PyMem_Free(tb->buckets); +#ifdef Py_GIL_DISABLED + delete (std::shared_mutex *)tb->mutex; +#endif PyMem_Free(tb); } @@ -149,7 +165,7 @@ _resize_if_necessary(PyArrayIdentityHash *tb) if (npy_mul_sizes_with_overflow(&alloc_size, new_size, tb->key_len + 1)) { return -1; } - tb->buckets = PyMem_Calloc(alloc_size, sizeof(PyObject *)); + tb->buckets = (PyObject **)PyMem_Calloc(alloc_size, sizeof(PyObject *)); if (tb->buckets == NULL) { tb->buckets = old_table; PyErr_NoMemory(); diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index a4252da87aff..cd061ba6fa11 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -7,12 +7,19 @@ #include "numpy/ndarraytypes.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct { int key_len; /* number of identities used */ /* Buckets stores: val1, key1[0], key1[1], ..., val2, key2[0], ... */ PyObject **buckets; npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ +#ifdef Py_GIL_DISABLED + void *mutex; +#endif } PyArrayIdentityHash; @@ -29,4 +36,8 @@ PyArrayIdentityHash_New(int key_len); NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ */ diff --git a/numpy/_core/src/common/numpyos.h b/numpy/_core/src/common/numpyos.h index fac82f7d438c..8fbecb122577 100644 --- a/numpy/_core/src/common/numpyos.h +++ b/numpy/_core/src/common/numpyos.h @@ -51,7 +51,7 @@ NPY_NO_EXPORT int NumPyOS_ascii_isupper(char c); NPY_NO_EXPORT int -NumPyOS_ascii_tolower(char c); +NumPyOS_ascii_tolower(int c); /* Convert a string to an int in an arbitrary base */ NPY_NO_EXPORT npy_longlong diff --git a/numpy/_core/src/common/simd/neon/math.h b/numpy/_core/src/common/simd/neon/math.h index 58d14809fbfe..76c5b58be788 100644 --- a/numpy/_core/src/common/simd/neon/math.h +++ b/numpy/_core/src/common/simd/neon/math.h @@ -28,11 +28,13 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) // Based on ARM doc, see https://developer.arm.com/documentation/dui0204/j/CIHDIACI NPY_FINLINE npyv_f32 npyv_sqrt_f32(npyv_f32 a) { + const npyv_f32 one = vdupq_n_f32(1.0f); const npyv_f32 zero = vdupq_n_f32(0.0f); const npyv_u32 pinf = vdupq_n_u32(0x7f800000); npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf); - // guard against floating-point division-by-zero error - npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a); + npyv_u32 is_special = vorrq_u32(is_zero, is_inf); + // guard against division-by-zero and infinity input to vrsqrte to avoid invalid fp error + npyv_f32 guard_byz = vbslq_f32(is_special, one, a); // estimate to (1/√a) npyv_f32 rsqrte = vrsqrteq_f32(guard_byz); /** @@ -47,10 +49,8 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) rsqrte = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, rsqrte), rsqrte), rsqrte); // a * (1/√a) npyv_f32 sqrt = vmulq_f32(a, rsqrte); - // return zero if the a is zero - // - return zero if a is zero. - // - return positive infinity if a is positive infinity - return vbslq_f32(vorrq_u32(is_zero, is_inf), a, sqrt); + // Handle special cases: return a for zeros and positive infinities + return vbslq_f32(is_special, a, sqrt); } #endif // NPY_SIMD_F64 diff --git a/numpy/_core/src/common/simd/vec/operators.h b/numpy/_core/src/common/simd/vec/operators.h index 50dac20f6d7d..3a402689d02f 100644 --- a/numpy/_core/src/common/simd/vec/operators.h +++ b/numpy/_core/src/common/simd/vec/operators.h @@ -44,6 +44,10 @@ /*************************** * Logical ***************************/ +#define NPYV_IMPL_VEC_BIN_WRAP(INTRIN, SFX) \ + NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ + { return vec_##INTRIN(a, b); } + #define NPYV_IMPL_VEC_BIN_CAST(INTRIN, SFX, CAST) \ NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ { return (npyv_##SFX)vec_##INTRIN((CAST)a, (CAST)b); } @@ -54,6 +58,15 @@ #else #define NPYV_IMPL_VEC_BIN_B64(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, b64, npyv_b64) #endif + +// Up to clang __VEC__ 10305 logical intrinsics do not support f32 or f64 +#if defined(NPY_HAVE_VX) && defined(__clang__) && __VEC__ < 10305 + #define NPYV_IMPL_VEC_BIN_F32(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, f32, npyv_u32) + #define NPYV_IMPL_VEC_BIN_F64(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, f64, npyv_u64) +#else + #define NPYV_IMPL_VEC_BIN_F32(INTRIN) NPYV_IMPL_VEC_BIN_WRAP(INTRIN, f32) + #define NPYV_IMPL_VEC_BIN_F64(INTRIN) NPYV_IMPL_VEC_BIN_WRAP(INTRIN, f64) +#endif // AND #define npyv_and_u8 vec_and #define npyv_and_s8 vec_and @@ -64,9 +77,9 @@ #define npyv_and_u64 vec_and #define npyv_and_s64 vec_and #if NPY_SIMD_F32 - #define npyv_and_f32 vec_and + NPYV_IMPL_VEC_BIN_F32(and) #endif -#define npyv_and_f64 vec_and +NPYV_IMPL_VEC_BIN_F64(and) #define npyv_and_b8 vec_and #define npyv_and_b16 vec_and #define npyv_and_b32 vec_and @@ -82,9 +95,9 @@ NPYV_IMPL_VEC_BIN_B64(and) #define npyv_or_u64 vec_or #define npyv_or_s64 vec_or #if NPY_SIMD_F32 - #define npyv_or_f32 vec_or + NPYV_IMPL_VEC_BIN_F32(or) #endif -#define npyv_or_f64 vec_or +NPYV_IMPL_VEC_BIN_F64(or) #define npyv_or_b8 vec_or #define npyv_or_b16 vec_or #define npyv_or_b32 vec_or @@ -100,9 +113,9 @@ NPYV_IMPL_VEC_BIN_B64(or) #define npyv_xor_u64 vec_xor #define npyv_xor_s64 vec_xor #if NPY_SIMD_F32 - #define npyv_xor_f32 vec_xor + NPYV_IMPL_VEC_BIN_F32(xor) #endif -#define npyv_xor_f64 vec_xor +NPYV_IMPL_VEC_BIN_F64(xor) #define npyv_xor_b8 vec_xor #define npyv_xor_b16 vec_xor #define npyv_xor_b32 vec_xor diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index a97b5d371d69..0b696633f9ad 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit a97b5d371d696564e206627a883b1341c65bd983 +Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 5d0d91f1e996..fc73a64b19a0 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -1877,7 +1877,9 @@ get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args) result = _controlfp(0, 0); return PyLong_FromLongLong(result); } -#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) || (defined(_MSC_VER) && defined(__clang__)) +#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) \ + || (defined(_MSC_VER) && defined(__clang__) && \ + (defined(_M_IX86) || defined(_M_AMD64))) { unsigned short cw = 0; __asm__("fstcw %w0" : "=m" (cw)); diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 236ed11e058d..8236ec5c65ae 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -62,7 +62,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { - int itemsize; + npy_intp itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); @@ -75,6 +75,12 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT) { + /* We can allow this, but should audit code paths before we do. */ + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); @@ -86,6 +92,11 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); + return NULL; + } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 6086f4d2c554..f4ba10d42e18 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -12,6 +12,10 @@ #include "npy_import.h" #include +#ifdef __cplusplus +extern "C" { +#endif + #define error_converting(x) (((x) == -1) && PyErr_Occurred()) #ifdef NPY_ALLOW_THREADS @@ -104,13 +108,13 @@ check_and_adjust_index(npy_intp *index, npy_intp max_item, int axis, /* Try to be as clear as possible about what went wrong. */ if (axis >= 0) { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for axis %d with size %"NPY_INTP_FMT, + "index %" NPY_INTP_FMT" is out of bounds " + "for axis %d with size %" NPY_INTP_FMT, *index, axis, max_item); } else { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for size %"NPY_INTP_FMT, *index, max_item); + "index %" NPY_INTP_FMT " is out of bounds " + "for size %" NPY_INTP_FMT, *index, max_item); } return -1; } @@ -163,7 +167,9 @@ check_and_adjust_axis(int *axis, int ndim) * . * clang versions < 8.0.0 have the same bug. */ -#if (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ +#ifdef __cplusplus +#define NPY_ALIGNOF(type) alignof(type) +#elif (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ || (defined __GNUC__ && __GNUC__ < 4 + (__GNUC_MINOR__ < 9) \ && !defined __clang__) \ || (defined __clang__ && __clang_major__ < 8)) @@ -347,4 +353,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, */ #define NPY_ITER_REDUCTION_AXIS(axis) (axis + (1 << (NPY_BITSOF_INT - 2))) +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ */ diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 48524aff4dac..fca733597a2d 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -150,8 +150,12 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, } if (PyArray_SIZE(tmp1) > 0) { /* The input is not empty, so convert it to NPY_INTP. */ - lst = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)tmp1, - NPY_INTP, 1, 1); + int flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + if (PyArray_ISINTEGER(tmp1)) { + flags = flags | NPY_ARRAY_FORCECAST; + } + PyArray_Descr* local_dtype = PyArray_DescrFromType(NPY_INTP); + lst = (PyArrayObject *)PyArray_FromAny((PyObject *)tmp1, local_dtype, 1, 1, flags, NULL); Py_DECREF(tmp1); if (lst == NULL) { /* Failed converting to NPY_INTP. */ @@ -177,7 +181,13 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, } if (lst == NULL) { - lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + int flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + if (PyArray_Check((PyObject *)list) && + PyArray_ISINTEGER((PyArrayObject *)list)) { + flags = flags | NPY_ARRAY_FORCECAST; + } + PyArray_Descr* local_dtype = PyArray_DescrFromType(NPY_INTP); + lst = (PyArrayObject *)PyArray_FromAny(list, local_dtype, 1, 1, flags, NULL); if (lst == NULL) { goto fail; } diff --git a/numpy/_core/src/multiarray/compiled_base.h b/numpy/_core/src/multiarray/compiled_base.h index e0e73ac798bf..b8081c8d3a55 100644 --- a/numpy/_core/src/multiarray/compiled_base.h +++ b/numpy/_core/src/multiarray/compiled_base.h @@ -10,9 +10,9 @@ arr_bincount(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr__monotonicity(PyObject *, PyObject *, PyObject *kwds); NPY_NO_EXPORT PyObject * -arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * -arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr_ravel_multi_index(PyObject *, PyObject *, PyObject *); NPY_NO_EXPORT PyObject * diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 67f0a4d509fa..158c9ed207b5 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -62,46 +62,24 @@ static PyObject * PyArray_GetObjectToGenericCastingImpl(void); -/** - * Fetch the casting implementation from one DType to another. - * - * @param from The implementation to cast from - * @param to The implementation to cast to - * - * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an - * error set. - */ -NPY_NO_EXPORT PyObject * -PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +static PyObject * +create_casting_impl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { - PyObject *res; - if (from == to) { - res = (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl; - } - else { - res = PyDict_GetItemWithError(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to); - } - if (res != NULL || PyErr_Occurred()) { - Py_XINCREF(res); - return res; - } /* - * The following code looks up CastingImpl based on the fact that anything + * Look up CastingImpl based on the fact that anything * can be cast to and from objects or structured (void) dtypes. - * - * The last part adds casts dynamically based on legacy definition */ if (from->type_num == NPY_OBJECT) { - res = PyArray_GetObjectToGenericCastingImpl(); + return PyArray_GetObjectToGenericCastingImpl(); } else if (to->type_num == NPY_OBJECT) { - res = PyArray_GetGenericToObjectCastingImpl(); + return PyArray_GetGenericToObjectCastingImpl(); } else if (from->type_num == NPY_VOID) { - res = PyArray_GetVoidToGenericCastingImpl(); + return PyArray_GetVoidToGenericCastingImpl(); } else if (to->type_num == NPY_VOID) { - res = PyArray_GetGenericToVoidCastingImpl(); + return PyArray_GetGenericToVoidCastingImpl(); } /* * Reject non-legacy dtypes. They need to use the new API to add casts and @@ -125,42 +103,105 @@ PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) from->singleton, to->type_num); if (castfunc == NULL) { PyErr_Clear(); - /* Remember that this cast is not possible */ - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *) to, Py_None) < 0) { - return NULL; - } Py_RETURN_NONE; } } - - /* PyArray_AddLegacyWrapping_CastingImpl find the correct casting level: */ - /* - * TODO: Possibly move this to the cast registration time. But if we do - * that, we have to also update the cast when the casting safety - * is registered. + /* Create a cast using the state of the legacy casting setup defined + * during the setup of the DType. + * + * Ideally we would do this when we create the DType, but legacy user + * DTypes don't have a way to signal that a DType is done setting up + * casts. Without such a mechanism, the safest way to know that a + * DType is done setting up is to register the cast lazily the first + * time a user does the cast. + * + * We *could* register the casts when we create the wrapping + * DTypeMeta, but that means the internals of the legacy user DType + * system would need to update the state of the casting safety flags + * in the cast implementations stored on the DTypeMeta. That's an + * inversion of abstractions and would be tricky to do without + * creating circular dependencies inside NumPy. */ if (PyArray_AddLegacyWrapping_CastingImpl(from, to, -1) < 0) { return NULL; } + /* castingimpls is unconditionally filled by + * AddLegacyWrapping_CastingImpl, so this won't create a recursive + * critical section + */ return PyArray_GetCastingImpl(from, to); } +} - if (res == NULL) { +static PyObject * +ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + int return_error = 0; + PyObject *res = NULL; + + /* Need to create the cast. This might happen at runtime so we enter a + critical section to avoid races */ + + Py_BEGIN_CRITICAL_SECTION(NPY_DT_SLOTS(from)->castingimpls); + + /* check if another thread filled it while this thread was blocked on + acquiring the critical section */ + if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to, + &res) < 0) { + return_error = 1; + } + else if (res == NULL) { + res = create_casting_impl(from, to); + if (res == NULL) { + return_error = 1; + } + else if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, res) < 0) { + return_error = 1; + } + } + Py_END_CRITICAL_SECTION(); + if (return_error) { + Py_XDECREF(res); return NULL; } - if (from == to) { + if (from == to && res == Py_None) { PyErr_Format(PyExc_RuntimeError, "Internal NumPy error, within-DType cast missing for %S!", from); Py_DECREF(res); return NULL; } - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *)to, res) < 0) { - Py_DECREF(res); + return res; +} + +/** + * Fetch the casting implementation from one DType to another. + * + * @param from The implementation to cast from + * @param to The implementation to cast to + * + * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an + * error set. + */ +NPY_NO_EXPORT PyObject * +PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + PyObject *res = NULL; + if (from == to) { + if ((NPY_DT_SLOTS(from)->within_dtype_castingimpl) != NULL) { + res = Py_XNewRef( + (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl); + } + } + else if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, &res) < 0) { return NULL; } - return res; + if (res != NULL) { + return res; + } + + return ensure_castingimpl_exists(from, to); } @@ -409,7 +450,7 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * implementations fully to have them available for doing the actual cast * later. * - * @param from The descriptor to cast from + * @param from The descriptor to cast from * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). @@ -2031,6 +2072,11 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) /** * Add a new casting implementation using a PyArrayMethod_Spec. * + * Using this function outside of module initialization without holding a + * critical section on the castingimpls dict may lead to a race to fill the + * dict. Use PyArray_GetGastingImpl to lazily register casts at runtime + * safely. + * * @param spec The specification to use as a source * @param private If private, allow slots not publicly exposed. * @return 0 on success -1 on failure @@ -2403,6 +2449,11 @@ cast_to_string_resolve_descriptors( return -1; } if (dtypes[1]->type_num == NPY_UNICODE) { + if (size > NPY_MAX_INT / 4) { + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", size); + return -1; + } size *= 4; } @@ -3466,7 +3517,9 @@ initialize_void_and_object_globals(void) { method->nin = 1; method->nout = 1; method->name = "object_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->flags = (NPY_METH_SUPPORTS_UNALIGNED + | NPY_METH_REQUIRES_PYAPI + | NPY_METH_NO_FLOATINGPOINT_ERRORS); method->casting = NPY_UNSAFE_CASTING; method->resolve_descriptors = &object_to_any_resolve_descriptors; method->get_strided_loop = &object_to_any_get_loop; @@ -3481,7 +3534,9 @@ initialize_void_and_object_globals(void) { method->nin = 1; method->nout = 1; method->name = "any_to_object_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->flags = (NPY_METH_SUPPORTS_UNALIGNED + | NPY_METH_REQUIRES_PYAPI + | NPY_METH_NO_FLOATINGPOINT_ERRORS); method->casting = NPY_SAFE_CASTING; method->resolve_descriptors = &any_to_object_resolve_descriptors; method->get_strided_loop = &any_to_object_get_loop; diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index c9f9ac3941a9..f4f66142101c 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1821,32 +1821,30 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, * Internal version of PyArray_CheckFromAny that accepts a dtypemeta. Borrows * references to the descriptor and dtype. */ - NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, int requires, PyObject *context) { PyObject *obj; + Py_XINCREF(in_descr); /* take ownership as we may replace it */ if (requires & NPY_ARRAY_NOTSWAPPED) { - if (!in_descr && PyArray_Check(op) && - PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { - in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); + if (!in_descr && PyArray_Check(op)) { + in_descr = PyArray_DESCR((PyArrayObject *)op); + Py_INCREF(in_descr); + } + if (in_descr) { + PyArray_DESCR_REPLACE_CANONICAL(in_descr); if (in_descr == NULL) { return NULL; } } - else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { - PyArray_DESCR_REPLACE(in_descr); - } - if (in_descr && in_descr->byteorder != NPY_IGNORE) { - in_descr->byteorder = NPY_NATIVE; - } } int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, max_depth, requires, context, &was_scalar); + Py_XDECREF(in_descr); if (obj == NULL) { return NULL; } @@ -2155,7 +2153,7 @@ PyArray_FromInterface(PyObject *origin) PyArray_Descr *dtype = NULL; char *data = NULL; Py_buffer view; - int i, n; + Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; @@ -2271,6 +2269,12 @@ PyArray_FromInterface(PyObject *origin) /* Get dimensions from shape tuple */ else { n = PyTuple_GET_SIZE(attr); + if (n > NPY_MAXDIMS) { + PyErr_Format(PyExc_ValueError, + "number of dimensions must be within [0, %d], got %d", + NPY_MAXDIMS, n); + goto fail; + } for (i = 0; i < n; i++) { PyObject *tmp = PyTuple_GET_ITEM(attr, i); dims[i] = PyArray_PyIntAsIntp(tmp); diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 3ed3c36d4bba..006a5504f728 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -274,8 +274,16 @@ _convert_from_tuple(PyObject *obj, int align) if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { + if (type->type_num == NPY_UNICODE) { + if (itemsize > NPY_MAX_INT / 4) { + itemsize = -1; + } + else { + itemsize *= 4; + } + } + if (itemsize < 0) { + /* Error may or may not be set by PyIntAsInt. */ PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); Py_DECREF(type); @@ -285,12 +293,8 @@ _convert_from_tuple(PyObject *obj, int align) if (type == NULL) { return NULL; } - if (type->type_num == NPY_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } + + type->elsize = itemsize; return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { @@ -1861,7 +1865,10 @@ _convert_from_str(PyObject *obj, int align) */ case NPY_UNICODELTR: check_num = NPY_UNICODE; - elsize <<= 2; + if (elsize > (NPY_MAX_INT / 4)) { + goto fail; + } + elsize *= 4; break; case NPY_VOIDLTR: diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 14fbc36c3bff..ac37a04c30c6 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -504,36 +504,12 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } - /* Prepare the arguments to call objects __dlpack__() method */ - static PyObject *call_kwnames = NULL; - static PyObject *dl_cpu_device_tuple = NULL; - static PyObject *max_version = NULL; - - if (call_kwnames == NULL) { - call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); - if (call_kwnames == NULL) { - return NULL; - } - } - if (dl_cpu_device_tuple == NULL) { - dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); - if (dl_cpu_device_tuple == NULL) { - return NULL; - } - } - if (max_version == NULL) { - max_version = Py_BuildValue("(i,i)", 1, 0); - if (max_version == NULL) { - return NULL; - } - } - /* * Prepare arguments for the full call. We always forward copy and pass * our max_version. `device` is always passed as `None`, but if the user * provided a device, we will replace it with the "cpu": (1, 0). */ - PyObject *call_args[] = {obj, Py_None, copy, max_version}; + PyObject *call_args[] = {obj, Py_None, copy, npy_static_pydata.dl_max_version}; Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; /* If device is passed it must be "cpu" and replace it with (1, 0) */ @@ -544,12 +520,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } assert(device_request == NPY_DEVICE_CPU); - call_args[1] = dl_cpu_device_tuple; + call_args[1] = npy_static_pydata.dl_cpu_device_tuple; } PyObject *capsule = PyObject_VectorcallMethod( - npy_interned_str.__dlpack__, call_args, nargsf, call_kwnames); + npy_interned_str.__dlpack__, call_args, nargsf, + npy_static_pydata.dl_call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated @@ -601,7 +578,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } dl_tensor = managed->dl_tensor; - readonly = 0; + readonly = 1; } const int ndim = dl_tensor.ndim; @@ -702,14 +679,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), } PyObject *ret = PyArray_NewFromDescr(&PyArray_Type, descr, ndim, shape, - dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); + dl_tensor.strides != NULL ? strides : NULL, data, readonly ? 0 : + NPY_ARRAY_WRITEABLE, NULL); + if (ret == NULL) { Py_DECREF(capsule); return NULL; } - if (readonly) { - PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); - } PyObject *new_capsule; if (versioned) { diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 7cd8afbed6d8..b936f4dc213e 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -1615,7 +1615,8 @@ typedef struct Dragon4_Options { * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 + +static npy_int32 FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -1646,7 +1647,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = '-'; has_sign = 1; } - + numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, digit_mode, cutoff_mode, precision, min_digits, buffer + has_sign, maxPrintLen - has_sign, @@ -1658,14 +1659,14 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, /* if output has a whole number */ if (printExponent >= 0) { /* leave the whole number at the start of the buffer */ - numWholeDigits = printExponent+1; + numWholeDigits = printExponent+1; if (numDigits <= numWholeDigits) { npy_int32 count = numWholeDigits - numDigits; pos += numDigits; - /* don't overflow the buffer */ - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } /* add trailing zeros up to the decimal point */ @@ -1767,9 +1768,12 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, pos < maxPrintLen) { /* add trailing zeros up to add_digits length */ /* compute the number of trailing zeros needed */ + npy_int32 count = desiredFractionalDigits - numFractionDigits; - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } numFractionDigits += count; @@ -1802,7 +1806,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } /* add any whitespace padding to right side */ - if (digits_right >= numFractionDigits) { + if (digits_right >= numFractionDigits) { npy_int32 count = digits_right - numFractionDigits; /* in trim_mode DptZeros, if right padding, add a space for the . */ @@ -1811,8 +1815,9 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = ' '; } - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } for ( ; count > 0; count--) { @@ -1823,14 +1828,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, if (digits_left > numWholeDigits + has_sign) { npy_int32 shift = digits_left - (numWholeDigits + has_sign); npy_int32 count = pos; - - if (count + shift > maxPrintLen) { - count = maxPrintLen - shift; + + if (count > maxPrintLen - shift) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } if (count > 0) { memmove(buffer + shift, buffer, count); } + pos = shift + count; for ( ; shift > 0; shift--) { buffer[shift - 1] = ' '; @@ -1860,7 +1867,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 +static npy_int32 FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -2158,7 +2165,7 @@ PrintInfNan(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, * Helper function that takes Dragon4 parameters and options and * calls Dragon4. */ -static npy_uint32 +static npy_int32 Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, Dragon4_Options *opt) @@ -2187,7 +2194,7 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * exponent: 5 bits * mantissa: 10 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary16( npy_half *value, Dragon4_Options *opt) { @@ -2274,7 +2281,7 @@ Dragon4_PrintFloat_IEEE_binary16( * exponent: 8 bits * mantissa: 23 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary32( npy_float32 *value, Dragon4_Options *opt) @@ -2367,7 +2374,7 @@ Dragon4_PrintFloat_IEEE_binary32( * exponent: 11 bits * mantissa: 52 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary64( npy_float64 *value, Dragon4_Options *opt) { @@ -2482,7 +2489,7 @@ typedef struct FloatVal128 { * intbit 1 bit, first u64 * mantissa: 63 bits, first u64 */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended( FloatVal128 value, Dragon4_Options *opt) { @@ -2580,7 +2587,7 @@ Dragon4_PrintFloat_Intel_extended( * system. But numpy defines NPY_FLOAT80, so if we come across it, assume it is * an Intel extended format. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended80( npy_float80 *value, Dragon4_Options *opt) { @@ -2604,7 +2611,7 @@ Dragon4_PrintFloat_Intel_extended80( #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 96-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended96( npy_float96 *value, Dragon4_Options *opt) { @@ -2628,7 +2635,7 @@ Dragon4_PrintFloat_Intel_extended96( #ifdef HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE /* Motorola Big-endian equivalent of the Intel-extended 96 fp format */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Motorola_extended96( npy_float96 *value, Dragon4_Options *opt) { @@ -2665,7 +2672,7 @@ typedef union FloatUnion128 #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 128-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended128( npy_float128 *value, Dragon4_Options *opt) { @@ -2694,7 +2701,7 @@ Dragon4_PrintFloat_Intel_extended128( * I am not sure if the arch also supports uint128, and C does not seem to * support int128 literals. So we use uint64 to do manipulation. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128( FloatVal128 val128, Dragon4_Options *opt) { @@ -2779,7 +2786,7 @@ Dragon4_PrintFloat_IEEE_binary128( } #if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_le( npy_float128 *value, Dragon4_Options *opt) { @@ -2799,7 +2806,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( * This function is untested, very few, if any, architectures implement * big endian IEEE binary128 floating point. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_be( npy_float128 *value, Dragon4_Options *opt) { @@ -2854,7 +2861,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( * https://gcc.gnu.org/wiki/Ieee128PowerPCA * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IBM_double_double( npy_float128 *value, Dragon4_Options *opt) { @@ -3041,6 +3048,7 @@ Dragon4_PrintFloat_IBM_double_double( * which goes up to about 10^4932. The Dragon4_scratch struct provides a string * buffer of this size. */ + #define make_dragon4_typefuncs_inner(Type, npy_type, format) \ \ PyObject *\ diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index d7a5e80800b6..188a55a4b5f5 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -235,8 +235,8 @@ any_to_object_get_loop( NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - - *flags = NPY_METH_REQUIRES_PYAPI; /* No need for floating point errors */ + /* Python API doesn't use FPEs and this also attempts to hide spurious ones. */ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; *out_loop = _strided_to_strided_any_to_object; *out_transferdata = PyMem_Malloc(sizeof(_any_to_object_auxdata)); @@ -342,7 +342,8 @@ object_to_any_get_loop( NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - *flags = NPY_METH_REQUIRES_PYAPI; + /* Python API doesn't use FPEs and this also attempts to hide spurious ones. */ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; /* NOTE: auxdata is only really necessary to flag `move_references` */ _object_to_any_auxdata *data = PyMem_Malloc(sizeof(*data)); diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 8d75f991f112..0b1b0fb39192 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -494,12 +494,14 @@ string_discover_descr_from_pyobject( itemsize = PyUnicode_GetLength(obj); } if (itemsize != -1) { - if (cls->type_num == NPY_UNICODE) { - itemsize *= 4; - } - if (itemsize > NPY_MAX_INT) { + if (itemsize > NPY_MAX_INT || ( + cls->type_num == NPY_UNICODE && itemsize > NPY_MAX_INT / 4)) { PyErr_SetString(PyExc_TypeError, "string too large to store inside array."); + return NULL; + } + if (cls->type_num == NPY_UNICODE) { + itemsize *= 4; } PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num); if (res == NULL) { @@ -1250,6 +1252,12 @@ dtypemeta_wrap_legacy_descriptor( return -1; } } + else { + // ensure the within dtype cast is populated for legacy user dtypes + if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { + return -1; + } + } return 0; } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index d1b0b13b4bca..8b3abbeb1883 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -285,6 +285,11 @@ PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) v, itemptr, arr); } +// Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew +#define PyArray_DESCR_REPLACE_CANONICAL(descr) do { \ + PyArray_Descr *_new_ = NPY_DT_CALL_ensure_canonical(descr); \ + Py_XSETREF(descr, _new_); \ + } while(0) #endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ */ diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index f3ce35f3092f..4549f107d76e 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -922,16 +922,23 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) } } + /* Fill in dimensions of new array */ + npy_intp dims[NPY_MAXDIMS] = {0}; + + for (int i = 0; i < PyArray_NDIM(aop); i++) { + dims[i] = PyArray_DIMS(aop)[i]; + } + + dims[axis] = total; + /* Construct new array */ - PyArray_DIMS(aop)[axis] = total; Py_INCREF(PyArray_DESCR(aop)); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(aop), PyArray_DESCR(aop), PyArray_NDIM(aop), - PyArray_DIMS(aop), + dims, NULL, NULL, 0, (PyObject *)aop); - PyArray_DIMS(aop)[axis] = n; if (ret == NULL) { goto fail; } @@ -2014,8 +2021,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis) } rcode = argsort(its[j]->dataptr, (npy_intp *)rit->dataptr, N, mps[j]); - if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j])) - && PyErr_Occurred())) { + if (rcode < 0 || (object && PyErr_Occurred())) { goto fail; } PyArray_ITER_NEXT(its[j]); @@ -2105,7 +2111,6 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, if (dtype == NULL) { return NULL; } - /* refs to dtype we own = 1 */ /* Look for binary search function */ if (perm) { @@ -2116,26 +2121,23 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, } if (binsearch == NULL && argbinsearch == NULL) { PyErr_SetString(PyExc_TypeError, "compare not supported for type"); - /* refs to dtype we own = 1 */ Py_DECREF(dtype); - /* refs to dtype we own = 0 */ return NULL; } - /* need ap2 as contiguous array and of right type */ - /* refs to dtype we own = 1 */ - Py_INCREF(dtype); - /* refs to dtype we own = 2 */ + /* need ap2 as contiguous array and of right dtype (note: steals dtype reference) */ ap2 = (PyArrayObject *)PyArray_CheckFromAny(op2, dtype, 0, 0, NPY_ARRAY_CARRAY_RO | NPY_ARRAY_NOTSWAPPED, NULL); - /* refs to dtype we own = 1, array creation steals one even on failure */ if (ap2 == NULL) { - Py_DECREF(dtype); - /* refs to dtype we own = 0 */ return NULL; } + /* + * The dtype reference we had was used for creating ap2, which may have + * replaced it with another. So here we copy the dtype of ap2 and use it for `ap1`. + */ + dtype = (PyArray_Descr *)Py_NewRef(PyArray_DESCR(ap2)); /* * If the needle (ap2) is larger than the haystack (op1) we copy the @@ -2144,9 +2146,9 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, if (PyArray_SIZE(ap2) > PyArray_SIZE(op1)) { ap1_flags |= NPY_ARRAY_CARRAY_RO; } + /* dtype is stolen, after this we have no reference */ ap1 = (PyArrayObject *)PyArray_CheckFromAny((PyObject *)op1, dtype, 1, 1, ap1_flags, NULL); - /* refs to dtype we own = 0, array creation steals one even on failure */ if (ap1 == NULL) { goto fail; } @@ -2887,10 +2889,11 @@ PyArray_Nonzero(PyArrayObject *self) * the fast bool count is followed by this sparse path is faster * than combining the two loops, even for larger arrays */ + npy_intp * multi_index_end = multi_index + nonzero_count; if (((double)nonzero_count / count) <= 0.1) { npy_intp subsize; npy_intp j = 0; - while (1) { + while (multi_index < multi_index_end) { npy_memchr(data + j * stride, 0, stride, count - j, &subsize, 1); j += subsize; @@ -2905,11 +2908,10 @@ PyArray_Nonzero(PyArrayObject *self) * stalls that are very expensive on most modern processors. */ else { - npy_intp *multi_index_end = multi_index + nonzero_count; npy_intp j = 0; /* Manually unroll for GCC and maybe other compilers */ - while (multi_index + 4 < multi_index_end) { + while (multi_index + 4 < multi_index_end && (j < count - 4) ) { *multi_index = j; multi_index += data[0] != 0; *multi_index = j + 1; @@ -2922,7 +2924,7 @@ PyArray_Nonzero(PyArrayObject *self) j += 4; } - while (multi_index < multi_index_end) { + while (multi_index < multi_index_end && (j < count) ) { *multi_index = j; multi_index += *data != 0; data += stride; diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 2806670d3e07..c3b6500f69d0 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -136,7 +136,6 @@ PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao) nd = PyArray_NDIM(ao); /* The legacy iterator only supports 32 dimensions */ assert(nd <= NPY_MAXDIMS_LEGACY_ITERS); - PyArray_UpdateFlags(ao, NPY_ARRAY_C_CONTIGUOUS); if (PyArray_ISCONTIGUOUS(ao)) { it->contiguous = 1; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 7f5bd29809a3..926efa54900f 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1605,7 +1605,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, } } } - else { + else if (PyDataType_ISOBJECT(dtype)) { PyObject *itemp, *otemp; PyObject *res; memcpy(&itemp, iptr, sizeof(itemp)); diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index c9d46d859f60..2d28a7a9ef4b 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1215,6 +1215,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, goto clean_ret; } + int needs_pyapi = PyDataType_FLAGCHK(PyArray_DESCR(ret), NPY_NEEDS_PYAPI); NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ret)); is1 = PyArray_STRIDES(ap1)[0]; is2 = PyArray_STRIDES(ap2)[0]; @@ -1225,6 +1226,9 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, n = n - n_left; for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); + if (needs_pyapi && PyErr_Occurred()) { + goto done; + } n++; ip2 -= is2; op += os; @@ -1236,19 +1240,21 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, op += os * (n1 - n2 + 1); } else { - for (i = 0; i < (n1 - n2 + 1); i++) { + for (i = 0; i < (n1 - n2 + 1) && (!needs_pyapi || !PyErr_Occurred()); + i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } } - for (i = 0; i < n_right; i++) { + for (i = 0; i < n_right && (!needs_pyapi || !PyErr_Occurred()); i++) { n--; dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } +done: NPY_END_THREADS_DESCR(PyArray_DESCR(ret)); if (PyErr_Occurred()) { goto clean_ret; @@ -5033,6 +5039,24 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + /* + * Initialize the default PyDataMem_Handler capsule singleton. + */ + PyDataMem_DefaultHandler = PyCapsule_New( + &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); + if (PyDataMem_DefaultHandler == NULL) { + goto err; + } + + /* + * Initialize the context-local current handler + * with the default PyDataMem_Handler capsule. + */ + current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); + if (current_handler == NULL) { + goto err; + } + if (initumath(m) != 0) { goto err; } @@ -5067,7 +5091,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - + if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { @@ -5081,23 +5105,6 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); - /* - * Initialize the default PyDataMem_Handler capsule singleton. - */ - PyDataMem_DefaultHandler = PyCapsule_New( - &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); - if (PyDataMem_DefaultHandler == NULL) { - goto err; - } - /* - * Initialize the context-local current handler - * with the default PyDataMem_Handler capsule. - */ - current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); - if (current_handler == NULL) { - goto err; - } - // initialize static reference to a zero-like array npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 2cc6ea72c26e..62e1fd3c1b15 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -184,6 +184,22 @@ initialize_static_globals(void) return -1; } + npy_static_pydata.dl_call_kwnames = + Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + if (npy_static_pydata.dl_call_kwnames == NULL) { + return -1; + } + + npy_static_pydata.dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_cpu_device_tuple == NULL) { + return -1; + } + + npy_static_pydata.dl_max_version = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_max_version == NULL) { + return -1; + } + /* * Initialize contents of npy_static_cdata struct * diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 45e3fa0e151a..287dc80e4c1f 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ #define NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int initialize_static_globals(void); @@ -134,6 +138,13 @@ typedef struct npy_static_pydata_struct { PyObject *GenericToVoidMethod; PyObject *ObjectToGenericMethod; PyObject *GenericToObjectMethod; + + /* + * Used in from_dlpack + */ + PyObject *dl_call_kwnames; + PyObject *dl_cpu_device_tuple; + PyObject *dl_max_version; } npy_static_pydata_struct; @@ -168,4 +179,8 @@ NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; +#ifdef __cplusplus +} +#endif + #endif // NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 81a846bf6d96..0599a48ad9af 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -270,6 +270,15 @@ as_pystring(PyObject *scalar, int coerce) "string coercion is disabled."); return NULL; } + else if (scalar_type == &PyBytes_Type) { + // assume UTF-8 encoding + char *buffer; + Py_ssize_t length; + if (PyBytes_AsStringAndSize(scalar, &buffer, &length) < 0) { + return NULL; + } + return PyUnicode_FromStringAndSize(buffer, length); + } else { // attempt to coerce to str scalar = PyObject_Str(scalar); @@ -624,11 +633,16 @@ PyArray_Descr * stringdtype_finalize_descr(PyArray_Descr *dtype) { PyArray_StringDTypeObject *sdtype = (PyArray_StringDTypeObject *)dtype; + // acquire the allocator lock in case the descriptor we want to finalize + // is shared between threads, see gh-28813 + npy_string_allocator *allocator = NpyString_acquire_allocator(sdtype); if (sdtype->array_owned == 0) { sdtype->array_owned = 1; + NpyString_release_allocator(allocator); Py_INCREF(dtype); return dtype; } + NpyString_release_allocator(allocator); PyArray_StringDTypeObject *ret = (PyArray_StringDTypeObject *)new_stringdtype_instance( sdtype->na_object, sdtype->coerce); ret->array_owned = 1; @@ -843,14 +857,17 @@ init_string_dtype(void) return -1; } - PyArray_Descr *singleton = - NPY_DT_CALL_default_descr(&PyArray_StringDType); + PyArray_StringDTypeObject *singleton = + (PyArray_StringDTypeObject *)NPY_DT_CALL_default_descr(&PyArray_StringDType); if (singleton == NULL) { return -1; } - PyArray_StringDType.singleton = singleton; + // never associate the singleton with an array + singleton->array_owned = 1; + + PyArray_StringDType.singleton = (PyArray_Descr *)singleton; PyArray_StringDType.type_num = NPY_VSTRING; for (int i = 0; PyArray_StringDType_casts[i] != NULL; i++) { diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 194a81e2d7e9..2893e817af08 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -1,24 +1,27 @@ -#include "highway_qsort.hpp" #define VQSORT_ONLY_STATIC 1 +#include "hwy/highway.h" #include "hwy/contrib/sort/vqsort-inl.h" -#if VQSORT_ENABLED - -#define DISPATCH_VQSORT(TYPE) \ -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ -{ \ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); \ -} \ +#include "highway_qsort.hpp" +#include "quicksort.hpp" -namespace np { namespace highway { namespace qsort_simd { +namespace np::highway::qsort_simd { +template +void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) +{ +#if VQSORT_ENABLED + hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); +#else + sort::Quick(arr, size); +#endif +} - DISPATCH_VQSORT(int32_t) - DISPATCH_VQSORT(uint32_t) - DISPATCH_VQSORT(int64_t) - DISPATCH_VQSORT(uint64_t) - DISPATCH_VQSORT(double) - DISPATCH_VQSORT(float) +template void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(float*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(double*, npy_intp); -} } } // np::highway::qsort_simd +} // np::highway::qsort_simd -#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index ba3fe4920594..b52e6da2b621 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,38 +1,20 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP -#include "hwy/highway.h" - #include "common.hpp" -// This replicates VQSORT_ENABLED from hwy/contrib/sort/shared-inl.h -// without checking the scalar target as this is not built within the dynamic -// dispatched sources. -#if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_CLANG && \ - (HWY_IS_HWASAN || HWY_IS_MSAN || HWY_IS_TSAN || HWY_IS_ASAN)) -#define NPY_DISABLE_HIGHWAY_SORT -#endif - -#ifndef NPY_DISABLE_HIGHWAY_SORT -namespace np { namespace highway { namespace qsort_simd { +namespace np::highway::qsort_simd { #ifndef NPY_DISABLE_OPTIMIZATION #include "highway_qsort.dispatch.h" #endif NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) -NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) - #ifndef NPY_DISABLE_OPTIMIZATION #include "highway_qsort_16bit.dispatch.h" #endif NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) -NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) -} } } // np::highway::qsort_simd +} // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP -#endif // NPY_DISABLE_HIGHWAY_SORT diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index d069cb6373d0..a7466709654d 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -1,30 +1,33 @@ -#include "highway_qsort.hpp" #define VQSORT_ONLY_STATIC 1 +#include "hwy/highway.h" #include "hwy/contrib/sort/vqsort-inl.h" +#include "highway_qsort.hpp" #include "quicksort.hpp" -#if VQSORT_ENABLED - -namespace np { namespace highway { namespace qsort_simd { - -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, intptr_t size) +namespace np::highway::qsort_simd { +template +void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) { -#if HWY_HAVE_FLOAT16 - hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast(arr), size, hwy::SortAscending()); +#if VQSORT_ENABLED + using THwy = std::conditional_t, hwy::float16_t, T>; + hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast(arr), size, hwy::SortAscending()); #else sort::Quick(arr, size); #endif } -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, intptr_t size) +#if !HWY_HAVE_FLOAT16 +template <> +void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); + sort::Quick(arr, size); } +#endif // !HWY_HAVE_FLOAT16 -} } } // np::highway::qsort_simd +template void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t*, npy_intp); +#if HWY_HAVE_FLOAT16 +template void NPY_CPU_DISPATCH_CURFX(QSort)(Half*, npy_intp); +#endif -#endif // VQSORT_ENABLED +} // np::highway::qsort_simd diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index aca748056f39..15e5668f599d 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -84,7 +84,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #elif !defined(NPY_DISABLE_HIGHWAY_SORT) + #else #include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif @@ -95,7 +95,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #elif !defined(NPY_DISABLE_HIGHWAY_SORT) + #else #include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.cpp similarity index 94% rename from numpy/_core/src/umath/dispatching.c rename to numpy/_core/src/umath/dispatching.cpp index 9e465dbe72a5..1bbdc4adb7d1 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.cpp @@ -38,6 +38,9 @@ #define _MULTIARRAYMODULE #define _UMATHMODULE +#include +#include + #define PY_SSIZE_T_CLEAN #include #include @@ -504,8 +507,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyObject *promoter = PyTuple_GET_ITEM(info, 1); if (PyCapsule_CheckExact(promoter)) { /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); + PyArrayMethod_PromoterFunction *promoter_function = + (PyArrayMethod_PromoterFunction *)PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); if (promoter_function == NULL) { return NULL; } @@ -770,8 +774,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * 2. Check all registered loops/promoters to find the best match. * 3. Fall back to the legacy implementation if no match was found. */ - PyObject *info = PyArrayIdentityHash_GetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes); + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { /* Found the ArrayMethod and NOT a promoter: return it */ @@ -793,8 +798,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; @@ -815,8 +821,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } else if (info != NULL) { /* Add result to the cache using the original types: */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; @@ -882,13 +889,55 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } /* Add this to the cache using the original types: */ - if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (cacheable && PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; } +#ifdef Py_GIL_DISABLED +/* + * Fast path for promote_and_get_info_and_ufuncimpl. + * Acquires a read lock to check for a cache hit and then + * only acquires a write lock on a cache miss to fill the cache + */ +static inline PyObject * +promote_and_get_info_and_ufuncimpl_with_locking( + PyUFuncObject *ufunc, + PyArrayObject *const ops[], + PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *op_dtypes[], + npy_bool legacy_promotion_is_possible) +{ + std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); + NPY_BEGIN_ALLOW_THREADS + mutex->lock_shared(); + NPY_END_ALLOW_THREADS + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); + mutex->unlock_shared(); + + if (info != NULL && PyObject_TypeCheck( + PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { + /* Found the ArrayMethod and NOT a promoter: return it */ + return info; + } + + // cache miss, need to acquire a write lock and recursively calculate the + // correct dispatch resolution + NPY_BEGIN_ALLOW_THREADS + mutex->lock(); + NPY_END_ALLOW_THREADS + info = promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); + mutex->unlock(); + + return info; +} +#endif /** * The central entry-point for the promotion and dispatching machinery. @@ -941,6 +990,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, { int nin = ufunc->nin, nargs = ufunc->nargs; npy_bool legacy_promotion_is_possible = NPY_TRUE; + PyObject *all_dtypes = NULL; + PyArrayMethodObject *method = NULL; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -976,18 +1027,20 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } - PyObject *info; - Py_BEGIN_CRITICAL_SECTION((PyObject *)ufunc); - info = promote_and_get_info_and_ufuncimpl(ufunc, +#ifdef Py_GIL_DISABLED + PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); +#else + PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); - Py_END_CRITICAL_SECTION(); +#endif if (info == NULL) { goto handle_error; } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); /* * In certain cases (only the logical ufuncs really), the loop we found may @@ -1218,7 +1271,7 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (dtype_tuple == NULL) { return -1; } - PyObject *promoter = PyCapsule_New(&logical_ufunc_promoter, + PyObject *promoter = PyCapsule_New((void *)&logical_ufunc_promoter, "numpy._ufunc_promoter", NULL); if (promoter == NULL) { Py_DECREF(dtype_tuple); diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index 9bb5fbd9b013..95bcb32bf0ce 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -43,6 +43,10 @@ object_only_ufunc_promoter(PyObject *ufunc, NPY_NO_EXPORT int install_logical_ufunc_promoter(PyObject *ufunc); +NPY_NO_EXPORT PyObject * +get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, + int ndtypes); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 9592df0e1366..705262fedd38 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -311,7 +311,7 @@ get_initial_from_ufunc( } } else if (context->descriptors[0]->type_num == NPY_OBJECT - && !reduction_is_empty) { + && !reduction_is_empty) { /* Allows `sum([object()])` to work, but use 0 when empty. */ Py_DECREF(identity_obj); return 0; @@ -323,13 +323,6 @@ get_initial_from_ufunc( return -1; } - if (PyTypeNum_ISNUMBER(context->descriptors[0]->type_num)) { - /* For numbers we can cache to avoid going via Python ints */ - memcpy(context->method->legacy_initial, initial, - context->descriptors[0]->elsize); - context->method->get_reduction_initial = ©_cached_initial; - } - /* Reduction can use the initial value */ return 1; } @@ -427,11 +420,47 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, }; PyBoundArrayMethodObject *bound_res = PyArrayMethod_FromSpec_int(&spec, 1); + if (bound_res == NULL) { return NULL; } PyArrayMethodObject *res = bound_res->method; + + // set cached initial value for numeric reductions to avoid creating + // a python int in every reduction + if (PyTypeNum_ISNUMBER(bound_res->dtypes[0]->type_num) && + ufunc->nin == 2 && ufunc->nout == 1) { + + PyArray_Descr *descrs[3]; + + for (int i = 0; i < 3; i++) { + // only dealing with numeric legacy dtypes so this should always be + // valid + descrs[i] = bound_res->dtypes[i]->singleton; + } + + PyArrayMethod_Context context = { + (PyObject *)ufunc, + bound_res->method, + descrs, + }; + + int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); + + if (ret < 0) { + Py_DECREF(bound_res); + return NULL; + } + + // only use the cached initial value if it's valid + if (ret > 0) { + context.method->get_reduction_initial = ©_cached_initial; + } + } + + Py_INCREF(res); Py_DECREF(bound_res); + return res; } diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index a4acc4437b1b..190ea6b8be72 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1074,10 +1074,14 @@ AVX512F_log_DOUBLE(npy_double * op, _mm512_mask_storeu_pd(op, load_mask, res); } - /* call glibc's log func when x around 1.0f */ + /* call glibc's log func when x around 1.0f. */ if (glibc_mask != 0) { double NPY_DECL_ALIGNED(64) ip_fback[8]; - _mm512_store_pd(ip_fback, x_in); + /* Using a mask_store_pd instead of store_pd to prevent a fatal + * compiler optimization bug. See + * https://github.com/numpy/numpy/issues/27745#issuecomment-2498684564 + * for details.*/ + _mm512_mask_store_pd(ip_fback, avx512_get_full_load_mask_pd(), x_in); for (int ii = 0; ii < 8; ++ii, glibc_mask >>= 1) { if (glibc_mask & 0x01) { diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 37f990f970ed..f0f8b2f4153f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -81,9 +81,9 @@ static const npy_cfloat oneF = 1.0f, zeroF = 0.0f; */ NPY_NO_EXPORT void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, - void *ip2, npy_intp is2_n, npy_intp NPY_UNUSED(is2_p), - void *op, npy_intp op_m, npy_intp NPY_UNUSED(op_p), - npy_intp m, npy_intp n, npy_intp NPY_UNUSED(p)) + void *ip2, npy_intp is2_n, + void *op, npy_intp op_m, + npy_intp m, npy_intp n) { /* * Vector matrix multiplication -- Level 2 BLAS @@ -465,13 +465,12 @@ NPY_NO_EXPORT void op, os_m, os_p, dm, dn, dp); } else if (vector_matrix) { /* vector @ matrix, switch ip1, ip2, p and m */ - @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, is1_m, - op, os_p, os_m, dp, dn, dm); + @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, + op, os_p, dp, dn); } else if (matrix_vector) { /* matrix @ vector */ - @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, is2_p, - - op, os_m, os_p, dm, dn, dp); + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, + op, os_m, dm, dn); } else { /* column @ row, 2d output, no blas needed or non-blas-able input */ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n, @@ -655,3 +654,174 @@ NPY_NO_EXPORT void } } /**end repeat**/ + +#if defined(HAVE_CBLAS) +/* + * Blas complex vector-matrix product via gemm (gemv cannot conjugate the vector). + */ +/**begin repeat + * + * #name = CFLOAT, CDOUBLE# + * #typ = npy_cfloat, npy_cdouble# + * #prefix = c, z# + * #step1 = &oneF, &oneD# + * #step0 = &zeroF, &zeroD# + */ +NPY_NO_EXPORT void +@name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, + void *ip2, npy_intp is2_n, npy_intp is2_m, + void *op, npy_intp os_m, + npy_intp n, npy_intp m) +{ + enum CBLAS_ORDER order = CblasRowMajor; + enum CBLAS_TRANSPOSE trans1, trans2; + CBLAS_INT N, M, lda, ldb, ldc; + assert(n <= BLAS_MAXSIZE && m <= BLAS_MAXSIZE); + N = (CBLAS_INT)n; + M = (CBLAS_INT)m; + + assert(os_m == sizeof(@typ@)); + ldc = (CBLAS_INT)m; + + assert(is_blasable2d(is1_n, sizeof(@typ@), n, 1, sizeof(@typ@))); + trans1 = CblasConjTrans; + lda = (CBLAS_INT)(is1_n / sizeof(@typ@)); + + if (is_blasable2d(is2_n, is2_m, n, m, sizeof(@typ@))) { + trans2 = CblasNoTrans; + ldb = (CBLAS_INT)(is2_n / sizeof(@typ@)); + } + else { + assert(is_blasable2d(is2_m, is2_n, m, n, sizeof(@typ@))); + trans2 = CblasTrans; + ldb = (CBLAS_INT)(is2_m / sizeof(@typ@)); + } + CBLAS_FUNC(cblas_@prefix@gemm)( + order, trans1, trans2, 1, M, N, @step1@, ip1, lda, + ip2, ldb, @step0@, op, ldc); +} +/**end repeat**/ +#endif + +/* + * matvec loops, using blas gemv if possible, and TYPE_dot implementations otherwise. + * signature is (m,n),(n)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dm = dimensions[1], dn = dimensions[2]; + npy_intp is1_m=steps[3], is1_n=steps[4], is2_n=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_c_blasable = is_blasable2d(is1_m, is1_n, dm, dn, sizeof(@typ@)); + npy_bool i1_f_blasable = is_blasable2d(is1_n, is1_m, dn, dm, sizeof(@typ@)); + npy_bool i2_blasable = is_blasable2d(is2_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool blasable = ((i1_c_blasable || i1_f_blasable) && i2_blasable + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, op, os_m, dm, dn); + continue; + } +#endif + /* + * Dot the different matrix rows with the vector to get output elements. + * (no conjugation for complex, unlike vecdot and vecmat) + */ + for (npy_intp j = 0; j < dm; j++, ip1 += is1_m, op += os_m) { + @TYPE@_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +} +/**end repeat**/ + +/* + * vecmat loops, using blas gemv for float and gemm for complex if possible, + * and TYPE_dot[c] implementations otherwise. + * Note that we cannot use gemv for complex, since we need to conjugate the vector. + * signature is (n),(n,m)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #COMPLEX = 0*4, 1*3, 0*11, 1# + * #DOT = dot*4, dotc*3, dot*11, dotc# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dn = dimensions[1], dm = dimensions[2]; + npy_intp is1_n=steps[3], is2_n=steps[4], is2_m=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_blasable = is_blasable2d(is1_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool i2_c_blasable = is_blasable2d(is2_n, is2_m, dn, dm, sizeof(@typ@)); + npy_bool i2_f_blasable = is_blasable2d(is2_m, is2_n, dm, dn, sizeof(@typ@)); + npy_bool blasable = (i1_blasable && (i2_c_blasable || i2_f_blasable) + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { +#if @COMPLEX@ + /* For complex, use gemm so we can conjugate the vector */ + @TYPE@_vecmat_via_gemm(ip1, is1_n, ip2, is2_n, is2_m, op, os_m, dn, dm); +#else + /* For float, use gemv (hence flipped order) */ + @TYPE@_gemv(ip2, is2_m, is2_n, ip1, is1_n, op, os_m, dm, dn); +#endif + continue; + } +#endif + /* Dot the vector with different matrix columns to get output elements. */ + for (npy_intp j = 0; j < dm; j++, ip2 += is2_m, op += os_m) { + @TYPE@_@DOT@(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +} +/**end repeat**/ diff --git a/numpy/_core/src/umath/matmul.h.src b/numpy/_core/src/umath/matmul.h.src index df3f549a545a..bff3d73c8993 100644 --- a/numpy/_core/src/umath/matmul.h.src +++ b/numpy/_core/src/umath/matmul.h.src @@ -7,15 +7,10 @@ **/ NPY_NO_EXPORT void @TYPE@_matmul(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -/**end repeat**/ - -/**begin repeat - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL, OBJECT# - */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat**/ diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 548530e1ca3b..1d3937eee1eb 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -218,10 +218,13 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, NPY_ITER_ZEROSIZE_OK | NPY_ITER_REFS_OK | NPY_ITER_DELAY_BUFALLOC | + /* + * stride negation (if reorderable) could currently misalign the + * first-visit and initial value copy logic. + */ + NPY_ITER_DONT_NEGATE_STRIDES | NPY_ITER_COPY_IF_OVERLAP; - if (!(context->method->flags & NPY_METH_IS_REORDERABLE)) { - it_flags |= NPY_ITER_DONT_NEGATE_STRIDES; - } + op_flags[0] = NPY_ITER_READWRITE | NPY_ITER_ALIGNED | NPY_ITER_ALLOCATE | diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 54092d8b293d..95d0ee4fb214 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -670,16 +670,8 @@ preprocess(CheckedIndexer needle, Py_ssize_t len_needle, assert(p->period + p->cut <= len_needle); // Compare parts of the needle to check for periodicity. - int cmp; - if (std::is_same::value) { - cmp = memcmp(needle.buffer, - needle.buffer + (p->period * sizeof(npy_ucs4)), - (size_t) p->cut); - } - else { - cmp = memcmp(needle.buffer, needle.buffer + p->period, - (size_t) p->cut); - } + int cmp = memcmp(needle.buffer, needle.buffer + p->period, + (size_t) p->cut); p->is_periodic = (0 == cmp); // If periodic, gap is unused; otherwise, calculate period and gap. diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 2bc4ce20acd6..0e28240ee5f0 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,6 +643,20 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { + npy_intp result_itemsize = given_descrs[0]->elsize + given_descrs[1]->elsize; + + /* NOTE: elsize can fit more than MAX_INT, but some code may still use ints */ + if (result_itemsize > NPY_MAX_INT || result_itemsize < 0) { + npy_intp length = result_itemsize; + if (given_descrs[0]->type == NPY_UNICODE) { + length /= 4; + } + PyErr_Format(PyExc_TypeError, + "addition result string of length %zd is too large to store inside array.", + length); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -650,11 +664,14 @@ string_addition_resolve_descriptors( loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2] = PyArray_DescrNew(loop_descrs[0]); if (loop_descrs[2] == NULL) { + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2]->elsize += loop_descrs[1]->elsize; diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 8748ad5e4974..69bb0b1eb197 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1108,7 +1108,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, * based on the fixed strides. */ PyArrayMethod_StridedLoop *strided_loop; - NpyAuxData *auxdata; + NpyAuxData *auxdata = NULL; npy_intp fixed_strides[NPY_MAXARGS]; NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); @@ -5963,7 +5963,6 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) NPY_AUXDATA_FREE(auxdata); Py_XDECREF(op2_array); - Py_XDECREF(iter); Py_XDECREF(iter2); for (int i = 0; i < nop; i++) { Py_XDECREF(operation_descrs[i]); @@ -5979,9 +5978,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) if (PyArray_FLAGS(op1_array) & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_DiscardWritebackIfCopy(op1_array); } + // iter might own the last refrence to op1_array, + // so it must be decref'd second + Py_XDECREF(iter); return NULL; } else { + Py_XDECREF(iter); Py_RETURN_NONE; } } diff --git a/numpy/_core/src/umath/ufunc_object.h b/numpy/_core/src/umath/ufunc_object.h index f8e522374394..dc55a561fba5 100644 --- a/numpy/_core/src/umath/ufunc_object.h +++ b/numpy/_core/src/umath/ufunc_object.h @@ -3,6 +3,9 @@ #include +#ifdef __cplusplus +extern "C" { +#endif NPY_NO_EXPORT const char* ufunc_get_name_cstr(PyUFuncObject *ufunc); @@ -10,4 +13,8 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc); NPY_NO_EXPORT PyObject * PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 3f8e7505ea39..9e812e97d6fe 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -1,6 +1,10 @@ #ifndef _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ #define _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, @@ -142,4 +146,8 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, NPY_NO_EXPORT int raise_no_loop_found_error(PyUFuncObject *ufunc, PyObject **dtypes); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 87ab150adc31..b751b5d773a0 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -55,6 +55,7 @@ def _override___module__(): istitle, isupper, str_len, ]: ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ _override___module__() diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index b6c15b5c3ca3..a1ed1ff2b9a5 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -12,10 +12,57 @@ from numpy._typing import ( _SupportsArray, ) +__all__ = [ + "add", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "equal", + "expandtabs", + "find", + "greater", + "greater_equal", + "index", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "less", + "less_equal", + "ljust", + "lower", + "lstrip", + "mod", + "multiply", + "not_equal", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rstrip", + "startswith", + "str_len", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", +] _StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @@ -66,7 +113,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @@ -78,13 +125,13 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload -def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +def mod(a: U_co, value: object) -> NDArray[np.str_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... @overload -def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... @overload -def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... +def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... def isalpha(x: UST_co) -> NDArray[np.bool]: ... def isalnum(a: UST_co) -> NDArray[np.bool]: ... @@ -147,14 +194,14 @@ def index( a: U_co, sub: U_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def index( @@ -169,14 +216,14 @@ def rindex( a: U_co, sub: U_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def rindex( @@ -225,7 +272,7 @@ def startswith( @overload def startswith( a: T_co, - suffix: T_co, + prefix: T_co, start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.bool]: ... @@ -254,13 +301,13 @@ def endswith( def decode( a: S_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[np.str_]: ... def encode( a: U_co | T_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[np.bytes_]: ... @overload @@ -273,74 +320,58 @@ def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTyp def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... +def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +def center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... @overload -def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... +def ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +def ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... @overload -def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def rjust( - a: U_co, - width: i_co, - fillchar: U_co = ..., -) -> NDArray[np.str_]: ... +def rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def rjust( - a: S_co, - width: i_co, - fillchar: S_co = ..., -) -> NDArray[np.bytes_]: ... +def rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def rjust( - a: _StringDTypeSupportsArray, - width: i_co, - fillchar: _StringDTypeSupportsArray = ..., -) -> _StringDTypeArray: ... +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... @overload -def rjust( - a: T_co, - width: i_co, - fillchar: T_co = ..., -) -> _StringDTypeOrUnicodeArray: ... +def rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... @overload -def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def rstrip(a: U_co, char: None | U_co = ...) -> NDArray[np.str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... @overload -def rstrip(a: S_co, char: None | S_co = ...) -> NDArray[np.bytes_]: ... +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def strip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... @overload -def strip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +def strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... @@ -425,15 +456,6 @@ def replace( count: i_co = ..., ) -> _StringDTypeOrUnicodeArray: ... -@overload -def join(sep: U_co, seq: U_co) -> NDArray[np.str_]: ... -@overload -def join(sep: S_co, seq: S_co) -> NDArray[np.bytes_]: ... -@overload -def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... -@overload -def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... - @overload def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... @overload @@ -456,23 +478,23 @@ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... def translate( a: U_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> NDArray[np.str_]: ... @overload def translate( a: S_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> NDArray[np.bytes_]: ... @overload def translate( a: _StringDTypeSupportsArray, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> _StringDTypeArray: ... @overload def translate( a: T_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index c0bb1f3f5370..028dc6a6c9e4 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -242,6 +242,15 @@ def npyiter_has_multi_index(it: "nditer"): return result +def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cnp.NpyIter_GetGetMultiIndex(cit, NULL) + cdef cnp.NpyIter_IterNextFunc iternext = \ + cnp.NpyIter_GetIterNext(cit, NULL) + return 1 + + def npyiter_has_finished(it: "nditer"): cdef cnp.NpyIter* cit try: @@ -266,3 +275,9 @@ def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): # This works in both modes arr[1].real = arr[1].real + 1 arr[1].imag = arr[1].imag + 1 + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index c7ceb92650c9..55c5005149c1 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -14,7 +14,8 @@ from numpy._core._rational_tests import rational from numpy.testing import ( - assert_array_equal, assert_warns, IS_PYPY) + assert_array_equal, assert_warns, IS_PYPY, IS_64BIT +) def arraylikes(): @@ -716,8 +717,7 @@ def __array__(self, dtype=None, copy=None): arr = np.array([ArrayLike]) assert arr[0] is ArrayLike - @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 956f9630a0c5..570a2b893b06 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -401,12 +401,15 @@ class Test_ARM_Features(AbstractTest): def load_flags(self): self.load_flags_cpuinfo("Features") arch = self.get_cpuinfo_item("CPU architecture") - # in case of mounting virtual filesystem of aarch64 kernel - is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0 - if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: - self.features_map = dict( - NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD" - ) + # in case of mounting virtual filesystem of aarch64 kernel without linux32 + is_rootfs_v8 = ( + not re.match("^armv[0-9]+l$", machine) and + (int('0' + next(iter(arch))) > 7 if arch else 0) + ) + if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = { + "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" + } else: self.features_map = dict( # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fce00a4927fc..ac29a2f7407b 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -267,6 +267,7 @@ def test_npyiter_api(install_temp): assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True assert checks.get_npyiter_ndim(it) == it.ndim == 2 + assert checks.test_get_multi_index_iter_next(it, arr) arr2 = np.random.rand(2, 1, 2) it = np.nditer([arr, arr2]) @@ -295,3 +296,9 @@ def test_complex(install_temp): arr = np.array([0, 10+10j], dtype="F") inc2_cfloat_struct(arr) assert arr[1] == (12 + 12j) + + +def test_npy_uintp_type_enum(): + import checks + assert checks.check_npy_uintp_type_enum() + diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index d9205912124e..41dd72429580 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -144,6 +144,17 @@ def test_readonly(self): y = np.from_dlpack(x) assert not y.flags.writeable + def test_writeable(self): + x_new, x_old = new_and_old_dlpack() + + # new dlpacks respect writeability + y = np.from_dlpack(x_new) + assert y.flags.writeable + + # old dlpacks are not writeable for backwards compatibility + y = np.from_dlpack(x_old) + assert not y.flags.writeable + def test_ndim0(self): x = np.array(1.0) y = np.from_dlpack(x) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index 4f735b7ce359..b879f12ae8ea 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -1,5 +1,5 @@ import sys - +import platform import pytest import numpy as np @@ -14,6 +14,9 @@ IS_PYPY ) +def _is_armhf(): + # Check if the current platform is ARMHF (32-bit ARM architecture) + return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): @@ -415,6 +418,9 @@ def __mul__(self, other): assert_equal(linspace(one, five), linspace(1, 5)) + # even when not explicitly enabled via FPSCR register + @pytest.mark.xfail(_is_armhf(), + reason="ARMHF/AArch32 platforms seem to FTZ subnormals") def test_denormal_numbers(self): # Regression test for gh-5437. Will probably fail when compiled # with ICC, which flushes denormals to zero diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 02ed3ece94b5..87508732d85c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -30,7 +30,7 @@ assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - check_support_sve, assert_array_compare, + check_support_sve, assert_array_compare, IS_64BIT ) from numpy.testing._private.utils import requires_memory, _no_tracing from numpy._core.tests._locales import CommaDecimalPointLocale @@ -983,7 +983,7 @@ def test_too_big_error(self): assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) - @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8, + @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation @@ -5374,6 +5374,13 @@ def test_object(self): # gh-6312 u, v = np.array(u, dtype='object'), np.array(v, dtype='object') assert_array_equal(idx, np.lexsort((u, v))) + def test_strings(self): # gh-27984 + for dtype in "TU": + surnames = np.array(['Hertz', 'Galilei', 'Hertz'], dtype=dtype) + first_names = np.array(['Heinrich', 'Galileo', 'Gustav'], dtype=dtype) + assert_array_equal(np.lexsort((first_names, surnames)), [1, 2, 0]) + + def test_invalid_axis(self): # gh-7528 x = np.linspace(0., 1., 42*3).reshape(42, 3) assert_raises(AxisError, np.lexsort, x, axis=2) @@ -10351,3 +10358,24 @@ def test_to_device(self): r"The stream argument in to_device\(\) is not supported" ): arr.to_device("cpu", stream=1) + +def test_array_interface_excess_dimensions_raises(): + """Regression test for gh-27949: ensure too many dims raises ValueError instead of segfault.""" + + # Dummy object to hold a custom __array_interface__ + class DummyArray: + def __init__(self, interface): + # Attach the array interface dict to mimic an array + self.__array_interface__ = interface + + # Create a base array (scalar) and copy its interface + base = np.array(42) # base can be any scalar or array + interface = dict(base.__array_interface__) + + # Modify the shape to exceed NumPy's dimension limit (NPY_MAXDIMS, typically 64) + interface['shape'] = tuple([1] * 136) # match the original bug report + + dummy = DummyArray(interface) + # Now, using np.asanyarray on this dummy should trigger a ValueError (not segfault) + with pytest.raises(ValueError, match="dimensions must be within"): + np.asanyarray(dummy) \ No newline at end of file diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 754688501c2d..0f7e01aef033 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,10 +1,13 @@ +import concurrent.futures import threading +import string import numpy as np import pytest -from numpy.testing import IS_WASM +from numpy.testing import IS_WASM, IS_64BIT from numpy.testing._private.utils import run_threaded +from numpy._core import _rational_tests if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") @@ -18,6 +21,7 @@ def func(seed): run_threaded(func, 500, pass_count=True) + def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads @@ -31,18 +35,14 @@ def func(): # see gh-26690 NUM_THREADS = 50 - b = threading.Barrier(NUM_THREADS) - a = np.ones(1000) - def f(): + def f(b): b.wait() return a.sum() - threads = [threading.Thread(target=f) for _ in range(NUM_THREADS)] + run_threaded(f, NUM_THREADS, pass_barrier=True) - [t.start() for t in threads] - [t.join() for t in threads] def test_temp_elision_thread_safety(): amid = np.ones(50000) @@ -120,3 +120,174 @@ def legacy_125(): task1.start() task2.start() + + +def test_parallel_reduction(): + # gh-28041 + NUM_THREADS = 50 + + x = np.arange(1000) + + def closure(b): + b.wait() + np.sum(x) + + run_threaded(closure, NUM_THREADS, pass_barrier=True) + + +def test_parallel_flat_iterator(): + # gh-28042 + x = np.arange(20).reshape(5, 4).T + + def closure(b): + b.wait() + for _ in range(100): + list(x.flat) + + run_threaded(closure, outer_iterations=100, pass_barrier=True) + + # gh-28143 + def prepare_args(): + return [np.arange(10)] + + def closure(x, b): + b.wait() + for _ in range(100): + y = np.arange(10) + y.flat[x] = x + + run_threaded(closure, pass_barrier=True, prepare_args=prepare_args) + + +def test_multithreaded_repeat(): + x0 = np.arange(10) + + def closure(b): + b.wait() + for _ in range(100): + x = np.repeat(x0, 2, axis=0)[::2] + + run_threaded(closure, max_workers=10, pass_barrier=True) + + +def test_structured_advanced_indexing(): + # Test that copyswap(n) used by integer array indexing is threadsafe + # for structured datatypes, see gh-15387. This test can behave randomly. + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)] * 2) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] + + rng = np.random.default_rng() + + def func(arr): + indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) + arr[indx] + + tpe = concurrent.futures.ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +def test_structured_threadsafety2(): + # Nonzero (and some other functions) should be threadsafe for + # structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)]) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] + + def func(arr): + arr.nonzero() + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +def test_stringdtype_multithreaded_access_and_mutation( + dtype, random_string_list): + # this test uses an RNG and may crash or cause deadlocks if there is a + # threading bug + rng = np.random.default_rng(0x4D3D3D3) + + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = rng.choice(chars, size=100 * 10, replace=True) + random_string_list = ret.view("U100") + + def func(arr): + rnd = rng.random() + # either write to random locations in the array, compute a ufunc, or + # re-initialize the array + if rnd < 0.25: + num = np.random.randint(0, arr.size) + arr[num] = arr[num] + "hello" + elif rnd < 0.5: + if rnd < 0.375: + np.add(arr, arr) + else: + np.add(arr, arr, out=arr) + elif rnd < 0.75: + if rnd < 0.875: + np.multiply(arr, np.int64(2)) + else: + np.multiply(arr, np.int64(2), out=arr) + else: + arr[:] = random_string_list + + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + arr = np.array(random_string_list, dtype=dtype) + futures = [tpe.submit(func, arr) for _ in range(500)] + + for f in futures: + f.result() + + +@pytest.mark.skipif( + not IS_64BIT, + reason="Sometimes causes failures or crashes due to OOM on 32 bit runners" +) +def test_legacy_usertype_cast_init_thread_safety(): + def closure(b): + b.wait() + np.full((10, 10), 1, _rational_tests.rational) + + run_threaded(closure, 250, pass_barrier=True) + +@pytest.mark.parametrize("dtype", [bool, int, float]) +def test_nonzero(dtype): + # See: gh-28361 + # + # np.nonzero uses np.count_nonzero to determine the size of the output array + # In a second pass the indices of the non-zero elements are determined, but they can have changed + # + # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure + # np.nonzero does not generate a segmentation fault + x = np.random.randint(4, size=10_000).astype(dtype) + + def func(index): + for _ in range(10): + if index == 0: + x[::2] = np.random.randint(2) + else: + try: + _ = np.nonzero(x) + except RuntimeError as ex: + assert 'number of non-zero array elements changed during function execution' in str(ex) + + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=50) + diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 688be5338437..9eec02239e34 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -237,6 +237,20 @@ def test_integer_comparison(sctype, other_val, comp): assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + @pytest.mark.parametrize("comp", [np.equal, np.not_equal, np.less_equal, np.less, np.greater_equal, np.greater]) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index c4a0a55227a0..851ce324d76c 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -14,7 +14,8 @@ assert_, assert_equal, IS_PYPY, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_raises, assert_raises_regex, assert_warns, suppress_warnings, - _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM + _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM, + IS_64BIT, ) from numpy.testing._private.utils import _no_tracing, requires_memory from numpy._utils import asbytes, asunicode @@ -2265,7 +2266,7 @@ def test_void_compare_segfault(self): def test_reshape_size_overflow(self): # gh-7455 a = np.ones(20)[::2] - if np.dtype(np.intp).itemsize == 8: + if IS_64BIT: # 64 bit. The following are the prime factors of 2**63 + 5, # plus a leading 2, so when multiplied together as int64, # the result overflows to a total size of 10. @@ -2654,3 +2655,9 @@ def test_sort_overlap(self): inp = np.linspace(0, size, num=size, dtype=np.intc) out = np.sort(inp) assert_equal(inp, out) + + def test_searchsorted_structured(self): + # gh-28190 + x = np.array([(0, 1.)], dtype=[('time', ' 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: @@ -299,6 +381,8 @@ def test_str_len(self, in_, out, dt): None, [3, -1]), ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), ]) def test_find(self, a, sub, start, end, out, dt): if "😊" in a and dt == "S": diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 43037f20e2f6..7ca2f21df363 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -824,20 +824,77 @@ def test_vecdot(self): actual3 = np.vecdot(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) - def test_vecdot_complex(self): - arr1 = np.array([1, 2j, 3]) - arr2 = np.array([1, 2, 3]) + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) - actual = np.vecdot(arr1, arr2) - expected = np.array([10-4j]) assert_array_equal(actual, expected) - actual2 = np.vecdot(arr2, arr1) - assert_array_equal(actual2, expected.conj()) + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) - actual3 = np.vecdot(arr1.astype("object"), arr2.astype("object")) + actual3 = np.matvec(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1.+1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + def test_vecdot_subclass(self): class MySubclass(np.ndarray): pass @@ -1644,51 +1701,46 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 - def check_identityless_reduction(self, a): - # np.minimum.reduce is an identityless reduction + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + yield a - # Verify that it sees the zero at various positions + @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, a, pos): + # np.minimum.reduce is an identityless reduction a[...] = 1 - a[1, 0, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) - assert_equal(np.minimum.reduce(a, axis=0), - [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 1, 1, 1], [0, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 1, 1], [0, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + a[pos] = 0 - a[...] = 1 - a[0, 1, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 0, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) - a[...] = 1 - a[0, 0, 1] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 0, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[0, 1, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, @@ -1703,30 +1755,6 @@ def test_identityless_reduction_huge_array(self): assert res[0] == 3 assert res[-1] == 4 - def test_identityless_reduction_corder(self): - a = np.empty((2, 3, 4), order='C') - self.check_identityless_reduction(a) - - def test_identityless_reduction_forder(self): - a = np.empty((2, 3, 4), order='F') - self.check_identityless_reduction(a) - - def test_identityless_reduction_otherorder(self): - a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig(self): - a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig_unaligned(self): - a = np.empty((3*4*5*8 + 1,), dtype='i1') - a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - def test_reduce_identity_depends_on_loop(self): """ The type of the result should always depend on the selected loop, not @@ -2757,9 +2785,9 @@ def test_ufunc_noncontiguous(ufunc): # bool, object, datetime are too irregular for this simple test continue inp, out = typ.split('->') - args_c = [np.empty(6, t) for t in inp] - # non contiguous (3 step) - args_n = [np.empty(18, t)[::3] for t in inp] + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] # alignment != itemsize is possible. So create an array with such # an odd step manually. args_o = [] @@ -2767,10 +2795,9 @@ def test_ufunc_noncontiguous(ufunc): orig_dt = np.dtype(t) off_dt = f"S{orig_dt.alignment}" # offset by alignment dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) - args_o.append(np.empty(6, dtype=dtype)["t"]) - + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) for a in args_c + args_n + args_o: - a.flat = range(1,7) + a.flat = range(1, 37) with warnings.catch_warnings(record=True): warnings.filterwarnings("always") @@ -2788,7 +2815,7 @@ def test_ufunc_noncontiguous(ufunc): # since different algorithms (libm vs. intrinsics) can be used # for different input strides res_eps = np.finfo(dt).eps - tol = 2*res_eps + tol = 3*res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index cef0348c2dac..4d56c785d5a7 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4019,7 +4019,9 @@ def test_array_ufunc_direct_call(self): def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" - expected_dict = {} if IS_PYPY else {"__module__": "numpy"} + expected_dict = ( + {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} + ) np.add.__doc__ = new_doc assert np.add.__doc__ == new_doc diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 8e51cd1694af..10278e52cbec 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -33,8 +33,8 @@ 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', + 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', - 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] + 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat'] diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi new file mode 100644 index 000000000000..d9f0d384cf6d --- /dev/null +++ b/numpy/_core/umath.pyi @@ -0,0 +1,197 @@ +from numpy import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) + +__all__ = [ + "absolute", + "add", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "bitwise_and", + "bitwise_count", + "bitwise_or", + "bitwise_xor", + "cbrt", + "ceil", + "conj", + "conjugate", + "copysign", + "cos", + "cosh", + "deg2rad", + "degrees", + "divide", + "divmod", + "e", + "equal", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "float_power", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "frexp", + "frompyfunc", + "gcd", + "greater", + "greater_equal", + "heaviside", + "hypot", + "invert", + "isfinite", + "isinf", + "isnan", + "isnat", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "matvec", + "maximum", + "minimum", + "mod", + "modf", + "multiply", + "negative", + "nextafter", + "not_equal", + "pi", + "positive", + "power", + "rad2deg", + "radians", + "reciprocal", + "remainder", + "right_shift", + "rint", + "sign", + "signbit", + "sin", + "sinh", + "spacing", + "sqrt", + "square", + "subtract", + "tan", + "tanh", + "true_divide", + "trunc", + "vecdot", + "vecmat", +] diff --git a/numpy/_distributor_init.pyi b/numpy/_distributor_init.pyi new file mode 100644 index 000000000000..94456aba2bcf --- /dev/null +++ b/numpy/_distributor_init.pyi @@ -0,0 +1 @@ +# intentionally left blank diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 000000000000..05c630c9b767 --- /dev/null +++ b/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,63 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + compat: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + disp: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/numpy/_globals.pyi b/numpy/_globals.pyi new file mode 100644 index 000000000000..b2231a9636b0 --- /dev/null +++ b/numpy/_globals.pyi @@ -0,0 +1,17 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/numpy/_pyinstaller/__init__.pyi b/numpy/_pyinstaller/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 000000000000..2642996dad7e --- /dev/null +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,13 @@ +from typing import Final + +# from `PyInstaller.compat` +is_conda: Final[bool] +is_pure_conda: Final[bool] + +# from `PyInstaller.utils.hooks` +def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... + +binaries: Final[list[tuple[str, str]]] + +hiddenimports: Final[list[str]] +excludedimports: Final[list[str]] diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 687e124ec2bb..dd9b133ddf88 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -121,15 +121,14 @@ NDArray as NDArray, ArrayLike as ArrayLike, _ArrayLike as _ArrayLike, - _FiniteNestedSequence as _FiniteNestedSequence, - _SupportsArray as _SupportsArray, - _SupportsArrayFunc as _SupportsArrayFunc, _ArrayLikeInt as _ArrayLikeInt, _ArrayLikeBool_co as _ArrayLikeBool_co, _ArrayLikeUInt_co as _ArrayLikeUInt_co, _ArrayLikeInt_co as _ArrayLikeInt_co, _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, _ArrayLikeNumber_co as _ArrayLikeNumber_co, _ArrayLikeTD64_co as _ArrayLikeTD64_co, _ArrayLikeDT64_co as _ArrayLikeDT64_co, @@ -140,6 +139,9 @@ _ArrayLikeString_co as _ArrayLikeString_co, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, _ArrayLikeUnknown as _ArrayLikeUnknown, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, _UnknownType as _UnknownType, ) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 27b59b75373a..7798e5d5d751 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -21,6 +21,7 @@ str_, bytes_, ) +from ._nbit_base import _32Bit, _64Bit from ._nested_sequence import _NestedSequence from ._shape import _Shape @@ -87,17 +88,16 @@ def __array_function__( ) if sys.version_info >= (3, 12): - from collections.abc import Buffer - - ArrayLike: TypeAlias = Buffer | _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, - ] + from collections.abc import Buffer as _Buffer else: - ArrayLike: TypeAlias = _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, - ] + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[ + dtype[Any], + bool | int | float | complex | str | bytes, +] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` @@ -165,6 +165,11 @@ def __array_function__( _ArrayLikeString_co ) +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float | int] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex | float | int] + # NOTE: This includes `builtins.bool`, but not `numpy.bool`. _ArrayLikeInt: TypeAlias = _DualArrayLike[ dtype[integer[Any]], diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 56e24fb73911..75af1ae8efba 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -151,19 +151,15 @@ class _IntTrueDiv(Protocol[_NBit1]): class _UnsignedIntOp(Protocol[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... @overload - def __call__(self, other: int | signedinteger[Any], /) -> Any: ... + def __call__(self, other: float, /) -> float64: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + def __call__(self, other: complex, /) -> complex128: ... @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + def __call__(self, other: signedinteger, /) -> Any: ... @type_check_only class _UnsignedIntBitOp(Protocol[_NBit1]): @@ -207,19 +203,13 @@ class _UnsignedIntDivMod(Protocol[_NBit1]): @type_check_only class _SignedIntOp(Protocol[_NBit1]): @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... + def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + def __call__(self, other: float, /) -> float64: ... @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + def __call__(self, other: complex, /) -> complex128: ... @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... @type_check_only class _SignedIntBitOp(Protocol[_NBit1]): @@ -261,9 +251,7 @@ class _SignedIntDivMod(Protocol[_NBit1]): @type_check_only class _FloatOp(Protocol[_NBit1]): @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... + def __call__(self, other: int, /) -> floating[_NBit1]: ... @overload def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index a14c01a513ba..56154c7af383 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,6 +1,10 @@ from typing import Literal -_BoolCodes = Literal["bool", "bool_", "?", "|?", "=?", "?"] +_BoolCodes = Literal[ + "bool", "bool_", + "?", "|?", "=?", "?", + "b1", "|b1", "=b1", "b1", +] # fmt: skip _UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] _UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 64c1d4647b7f..b5ac0ff635dd 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,32 +4,32 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. - """ from typing import ( Any, Generic, + Literal, NoReturn, - TypedDict, - overload, + Protocol, + SupportsIndex, TypeAlias, + TypedDict, TypeVar, - Literal, - SupportsIndex, - Protocol, + overload, type_check_only, ) + from typing_extensions import LiteralString, Unpack import numpy as np -from numpy import ufunc, _CastingKind, _OrderKACF +from numpy import _CastingKind, _OrderKACF, ufunc from numpy.typing import NDArray -from ._shape import _ShapeLike -from ._scalars import _ScalarLike_co from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike _T = TypeVar("_T") _2Tuple: TypeAlias = tuple[_T, _T] @@ -61,6 +61,13 @@ class _SupportsArrayUFunc(Protocol): **kwargs: Any, ) -> Any: ... +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. @@ -72,11 +79,15 @@ class _SupportsArrayUFunc(Protocol): # NOTE: If 2 output types are returned then `out` must be a # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable +# pyright: reportIncompatibleMethodOverride=false + @type_check_only class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -146,6 +157,8 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -158,34 +171,61 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def signature(self) -> None: ... - @overload + @overload # (scalar, scalar) -> scalar def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... - @overload + @overload # (array-like, array) -> array def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: ArrayLike, + x2: NDArray[np.generic], + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: NDArray[np.generic], + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... def at( self, @@ -223,41 +263,69 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: None | NDArray[Any] = ..., ) -> NDArray[Any]: ... - # Expand `**kwargs` into explicit keyword-only arguments - @overload + @overload # (scalar, scalar) -> scalar def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... - @overload - def outer( # type: ignore[misc] + @overload # (array-like, array) -> array + def outer( self, A: ArrayLike, + B: NDArray[np.generic], + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: NDArray[np.generic], B: ArrayLike, - /, *, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... @type_check_only class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -326,6 +394,8 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -381,6 +451,8 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 9794c4e0c4a1..ca3aacd84d5b 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -66,6 +66,7 @@ def _rename_parameter(old_names, new_names, dep_version=None): def decorator(fun): @functools.wraps(fun) def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test for old_name, new_name in zip(old_names, new_names): if old_name in kwargs: if dep_version: diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi new file mode 100644 index 000000000000..ced45bfdeb44 --- /dev/null +++ b/numpy/_utils/__init__.pyi @@ -0,0 +1,31 @@ +from collections.abc import Callable, Iterable +from typing import Protocol, overload, type_check_only + +from _typeshed import IdentityFunction +from typing_extensions import TypeVar + +from ._convertions import asbytes as asbytes +from ._convertions import asunicode as asunicode + +### + +_T = TypeVar("_T") +_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module(module: _HasModuleT) -> _HasModuleT: ... + +# +def _rename_parameter( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... diff --git a/numpy/_utils/_convertions.pyi b/numpy/_utils/_convertions.pyi new file mode 100644 index 000000000000..6cc599acc94f --- /dev/null +++ b/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi new file mode 100644 index 000000000000..ba0260d3a593 --- /dev/null +++ b/numpy/_utils/_inspect.pyi @@ -0,0 +1,71 @@ +import types +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, overload + +from _typeshed import SupportsLenAndGetItem +from typing_extensions import TypeIs, TypeVar + +__all__ = ["formatargspec", "getargspec"] + +### + +_T = TypeVar("_T") +_RT = TypeVar("_RT") + +_StrSeq: TypeAlias = SupportsLenAndGetItem[str] +_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] + +_JoinFunc: TypeAlias = Callable[[list[_T]], _T] +_FormatFunc: TypeAlias = Callable[[_T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi new file mode 100644 index 000000000000..29dd4c912aa9 --- /dev/null +++ b/numpy/_utils/_pep440.pyi @@ -0,0 +1,121 @@ +import re +from collections.abc import Callable +from typing import ( + Any, + ClassVar, + Final, + Generic, + NamedTuple, + TypeVar, + final, + type_check_only, +) +from typing import ( + Literal as L, +) + +from typing_extensions import TypeIs + +__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] + +### + +_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...]) +_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) + +### + +VERSION_PATTERN: Final[str] = ... + +class InvalidVersion(ValueError): ... + +@type_check_only +@final +class _InfinityType: + def __hash__(self) -> int: ... + def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ... + def __ne__(self, other: object, /) -> bool: ... + def __lt__(self, other: object, /) -> L[False]: ... + def __le__(self, other: object, /) -> L[False]: ... + def __gt__(self, other: object, /) -> L[True]: ... + def __ge__(self, other: object, /) -> L[True]: ... + def __neg__(self) -> _NegativeInfinityType: ... + +Infinity: Final[_InfinityType] = ... + +@type_check_only +@final +class _NegativeInfinityType: + def __hash__(self) -> int: ... + def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ... + def __ne__(self, other: object, /) -> bool: ... + def __lt__(self, other: object, /) -> L[True]: ... + def __le__(self, other: object, /) -> L[True]: ... + def __gt__(self, other: object, /) -> L[False]: ... + def __ge__(self, other: object, /) -> L[False]: ... + def __neg__(self) -> _InfinityType: ... + +NegativeInfinity: Final[_NegativeInfinityType] = ... + +class _Version(NamedTuple): + epoch: int + release: tuple[int, ...] + dev: tuple[str, int] | None + pre: tuple[str, int] | None + post: tuple[str, int] | None + local: tuple[str | int, ...] | None + +class _BaseVersion(Generic[_CmpKeyT_co]): + _key: _CmpKeyT_co + def __hash__(self) -> int: ... + def __eq__(self, other: _BaseVersion, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: _BaseVersion, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __lt__(self, other: _BaseVersion, /) -> bool: ... + def __le__(self, other: _BaseVersion, /) -> bool: ... + def __ge__(self, other: _BaseVersion, /) -> bool: ... + def __gt__(self, other: _BaseVersion, /) -> bool: ... + def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ... + +class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): + _version: Final[str] + def __init__(self, /, version: str) -> None: ... + @property + def public(self) -> str: ... + @property + def base_version(self) -> str: ... + @property + def local(self) -> None: ... + @property + def is_prerelease(self) -> L[False]: ... + @property + def is_postrelease(self) -> L[False]: ... + +class Version( + _BaseVersion[ + tuple[ + int, # epoch + tuple[int, ...], # release + tuple[str, int] | _InfinityType | _NegativeInfinityType, # pre + tuple[str, int] | _NegativeInfinityType, # post + tuple[str, int] | _InfinityType, # dev + tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType, # local + ], + ], +): + _regex: ClassVar[re.Pattern[str]] = ... + _version: Final[str] + + def __init__(self, /, version: str) -> None: ... + @property + def public(self) -> str: ... + @property + def base_version(self) -> str: ... + @property + def local(self) -> str | None: ... + @property + def is_prerelease(self) -> bool: ... + @property + def is_postrelease(self) -> bool: ... + +# +def parse(version: str) -> Version | LegacyVersion: ... diff --git a/numpy/conftest.py b/numpy/conftest.py index b37092296005..0eb42d1103e4 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,6 +2,7 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os +import string import sys import tempfile from contextlib import contextmanager @@ -10,9 +11,11 @@ import hypothesis import pytest import numpy +import numpy as np from numpy._core._multiarray_tests import get_fpu_mode -from numpy.testing._private.utils import NOGIL_BUILD +from numpy._core.tests._natype import pd_NA +from numpy.testing._private.utils import NOGIL_BUILD, get_stringdtype_dtype try: from scipy_doctest.conftest import dt_config @@ -204,12 +207,12 @@ def warnings_errors_and_rng(test=None): dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType # temporary skips - dt_config.skiplist = set([ + dt_config.skiplist = { 'numpy.savez', # unclosed file 'numpy.matlib.savez', 'numpy.__array_namespace_info__', 'numpy.matlib.__array_namespace_info__', - ]) + } # xfail problematic tutorials dt_config.pytest_extra_xfail = { @@ -231,3 +234,28 @@ def warnings_errors_and_rng(test=None): 'numpy/f2py/_backends/_distutils.py', ] + +@pytest.fixture +def random_string_list(): + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +@pytest.fixture(params=[True, False]) +def coerce(request): + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + return get_stringdtype_dtype(na_object, coerce) diff --git a/numpy/core/_dtype.pyi b/numpy/core/_dtype.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/core/_dtype_ctypes.pyi b/numpy/core/_dtype_ctypes.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/core/overrides.pyi b/numpy/core/overrides.pyi new file mode 100644 index 000000000000..fab3512626f8 --- /dev/null +++ b/numpy/core/overrides.pyi @@ -0,0 +1,7 @@ +# NOTE: At runtime, this submodule dynamically re-exports any `numpy._core.overrides` +# member, and issues a `DeprecationWarning` when accessed. But since there is no +# `__dir__` or `__all__` present, these annotations would be unverifiable. Because +# this module is also deprecated in favor of `numpy._core`, and therefore not part of +# the public API, we omit the "re-exports", which in practice would require literal +# duplication of the stubs in order for the `@deprecated` decorator to be understood +# by type-checkers. diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 5cb345035f2c..11e5611653fa 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,18 +1,13 @@ -from typing import ( - Any, - Final, - Generic, - Literal as L, - NoReturn, - TypeAlias, - final, - type_check_only, -) +# ruff: noqa: ANN401 +from types import MemberDescriptorType +from typing import Any, ClassVar, Generic, NoReturn, TypeAlias, final, type_check_only +from typing import Literal as L + from typing_extensions import LiteralString, Self, TypeVar import numpy as np -__all__ = [ +__all__ = [ # noqa: RUF022 'BoolDType', 'Int8DType', 'ByteDType', @@ -53,7 +48,7 @@ __all__ = [ _SCT_co = TypeVar("_SCT_co", bound=np.generic, covariant=True) @type_check_only -class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] +class _SimpleDType(np.dtype[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] names: None # pyright: ignore[reportIncompatibleVariableOverride] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @@ -73,7 +68,7 @@ class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(Generic[_SCT_co], _SimpleDType[_SCT_co]): # type: ignore[misc] +class _LiteralDType(_SimpleDType[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -234,10 +229,11 @@ class UInt64DType( # type: ignore[misc] def str(self) -> L["u8"]: ... # Standard C-named version/alias: -ByteDType: Final = Int8DType -UByteDType: Final = UInt8DType -ShortDType: Final = Int16DType -UShortDType: Final = UInt16DType +# NOTE: Don't make these `Final`: it will break stubtest +ByteDType = Int8DType +UByteDType = UInt8DType +ShortDType = Int16DType +UShortDType = UInt16DType @final class IntDType( # type: ignore[misc] @@ -419,11 +415,11 @@ class ObjectDType( # type: ignore[misc] @final class BytesDType( # type: ignore[misc] - Generic[_ItemSize_co], _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, _NBit[L[1],_ItemSize_co], _SimpleDType[np.bytes_], + Generic[_ItemSize_co], ): def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... @property @@ -435,11 +431,11 @@ class BytesDType( # type: ignore[misc] @final class StrDType( # type: ignore[misc] - Generic[_ItemSize_co], _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, _NBit[L[4],_ItemSize_co], _SimpleDType[np.str_], + Generic[_ItemSize_co], ): def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... @property @@ -451,11 +447,11 @@ class StrDType( # type: ignore[misc] @final class VoidDType( # type: ignore[misc] - Generic[_ItemSize_co], _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, _NBit[L[1], _ItemSize_co], - np.dtype[np.void], + np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] + Generic[_ItemSize_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... @@ -578,8 +574,13 @@ class StringDType( # type: ignore[misc] _NativeOrder, _NBit[L[8], L[16]], # TODO: Replace the (invalid) `str` with the scalar type, once implemented - np.dtype[str], # type: ignore[type-var] + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues,reportInvalidTypeArguments] ): + @property + def coerce(self) -> L[True]: ... + na_object: ClassVar[MemberDescriptorType] # does not get instantiated + + # def __new__(cls, /) -> StringDType: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 095e2600f317..e926a52d1b51 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -26,7 +26,7 @@ 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', 'isallocatable', 'isarray', 'isarrayofstrings', 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', - 'iscomplex', + 'iscomplex', 'iscstyledirective', 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', @@ -423,6 +423,11 @@ def isrequired(var): return not isoptional(var) and isintent_nothide(var) +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + def isintent_in(var): if 'intent' not in var: return 1 diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 6eea03477808..3ea1888df113 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -510,11 +510,9 @@ def readfortrancode(ffile, dowithline=show, istop=1): origfinalline = '' else: if localdolowercase: - # lines with intent() should be lowered otherwise - # TestString::test_char fails due to mixed case - # f2py directives without intent() should be left untouched - # gh-2547, gh-27697, gh-26681 - finalline = ll.lower() if "intent" in ll.lower() or not is_f2py_directive else ll + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll else: finalline = ll origfinalline = ll @@ -1491,26 +1489,9 @@ def analyzeline(m, case, line): line = m.group('after').strip() if not line[0] == '/': line = '//' + line + cl = [] - f = 0 - bn = '' - ol = '' - for c in line: - if c == '/': - f = f + 1 - continue - if f >= 3: - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - f = f - 2 - bn = '' - ol = '' - if f % 2: - bn = bn + c - else: - ol = ol + c + [_, bn, ol] = re.split('/', line, maxsplit=2) bn = bn.strip() if not bn: bn = '_BLNK_' diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index bf7b46c89f08..84137811a446 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -245,6 +245,11 @@ if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + return m; } #ifdef __cplusplus diff --git a/numpy/f2py/tests/src/crackfortran/common_with_division.f b/numpy/f2py/tests/src/crackfortran/common_with_division.f new file mode 100644 index 000000000000..4aa12cf6dcee --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/common_with_division.f @@ -0,0 +1,17 @@ + subroutine common_with_division + integer lmu,lb,lub,lpmin + parameter (lmu=1) + parameter (lb=20) +c crackfortran fails to parse this +c parameter (lub=(lb-1)*lmu+1) +c crackfortran can successfully parse this though + parameter (lub=lb*lmu-lmu+1) + parameter (lpmin=2) + +c crackfortran fails to parse this correctly +c common /mortmp/ ctmp((lub*(lub+1)*(lub+1))/lpmin+1) + + common /mortmp/ ctmp(lub/lpmin+1) + + return + end diff --git a/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 000000000000..1c4b8c192b1b --- /dev/null +++ b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index ed3588c25475..965a6b0f87e8 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -114,12 +114,16 @@ def incr(x): class TestCrackFortran(util.F2PyTest): # gh-2848: commented lines between parameters in subroutine parameter lists - sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90"), + util.getpath("tests", "src", "crackfortran", "common_with_division.f") + ] def test_gh2848(self): r = self.module.gh2848(1, 2) assert r == (1, 2) + def test_common_with_division(self): + assert len(self.module.mortmp.ctmp) == 11 class TestMarkinnerspaces: # gh-14118: markinnerspaces does not handle multiple quotations diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 335c8470d2af..c62f82ac3fc0 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -122,6 +122,15 @@ def test_gh26148b(self): assert(res[0] == 8) assert(res[1] == 15) +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + @pytest.mark.slow def test_gh26623(): # Including libraries with . should not generate an incorrect meson.build diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index d9b316dcc45d..25b638890a96 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -1,8 +1,8 @@ import platform import pytest -import numpy as np from numpy import array +from numpy.testing import IS_64BIT from . import util @@ -53,8 +53,7 @@ def check_function(self, t, tname): "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) class TestCReturnReal(TestReturnReal): suffix = ".pyf" diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py index ab9c093dbb82..8a9eb8743501 100644 --- a/numpy/f2py/tests/test_semicolon_split.py +++ b/numpy/f2py/tests/test_semicolon_split.py @@ -1,6 +1,7 @@ import platform import pytest -import numpy as np + +from numpy.testing import IS_64BIT from . import util @@ -11,8 +12,7 @@ "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) class TestMultiline(util.F2PyTest): suffix = ".pyf" @@ -44,8 +44,7 @@ def test_multiline(self): "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) @pytest.mark.slow class TestCallstatement(util.F2PyTest): diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 9964c285e2bc..e2fcc1ba39d4 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -57,7 +57,6 @@ def check_language(lang, code_snippet=None): return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) - return False fortran77_code = ''' diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 5cb28db2239e..7673c1800a92 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,51 +1,38 @@ -from typing import Any, TypeVar, overload, Literal as L +from typing import Any, Final, TypeVar, overload +from typing import Literal as L -from numpy import generic, integer, floating, complexfloating -from numpy._typing import ( - NDArray, - ArrayLike, - _ShapeLike, - _ArrayLike, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, -) +from numpy import complexfloating, floating, generic, integer +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ShapeLike -__all__ = ["fftshift", "ifftshift", "fftfreq", "rfftfreq"] +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] _SCT = TypeVar("_SCT", bound=generic) +### + +integer_types: Final[tuple[type[int], type[integer]]] = ... + +### + @overload -def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def fftshift(x: _ArrayLike[_SCT], axes: _ShapeLike | None = None) -> NDArray[_SCT]: ... @overload -def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def ifftshift(x: _ArrayLike[_SCT], axes: _ShapeLike | None = None) -> NDArray[_SCT]: ... @overload -def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def fftfreq( - n: int | integer[Any], - d: _ArrayLikeFloat_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... +def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... @overload -def fftfreq( - n: int | integer[Any], - d: _ArrayLikeComplex_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... +# @overload -def rfftfreq( - n: int | integer[Any], - d: _ArrayLikeFloat_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... +def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... @overload -def rfftfreq( - n: int | integer[Any], - d: _ArrayLikeComplex_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi new file mode 100644 index 000000000000..887cbe7e27c9 --- /dev/null +++ b/numpy/fft/helper.pyi @@ -0,0 +1,22 @@ +from typing import Any +from typing import Literal as L + +from typing_extensions import deprecated + +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ShapeLike + +from ._helper import integer_types as integer_types + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +### + +@deprecated("Please use `numpy.fft.fftshift` instead.") +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.ifftshift` instead.") +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.fftfreq` instead.") +def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.rfftfreq` instead.") +def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index 751b5dc74d30..e18949af5e31 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -24,6 +24,7 @@ py.install_sources( '_helper.py', '_helper.pyi', 'helper.py', + 'helper.pyi', ], subdir: 'numpy/fft' ) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 3261cdac8cf6..20f2d576bf00 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -10,35 +10,7 @@ from typing import ( from typing_extensions import deprecated import numpy as np -from numpy import ( - generic, - number, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - int8, - byte, - intc, - int_, - intp, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) +from numpy import generic, number, int8, intp, timedelta64, object_ from numpy._typing import ( ArrayLike, @@ -75,33 +47,17 @@ _NumberType = TypeVar("_NumberType", bound=number[Any]) # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_SCTNoCast = TypeVar( - "_SCTNoCast", +_EitherSCT = TypeVar( + "_EitherSCT", np.bool, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - byte, - intc, - int_, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip class UniqueAllResult(NamedTuple, Generic[_SCT]): values: NDArray[_SCT] @@ -339,11 +295,11 @@ def unique_values(x: ArrayLike, /) -> NDArray[Any]: ... @overload def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., return_indices: L[False] = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def intersect1d( ar1: ArrayLike, @@ -353,11 +309,11 @@ def intersect1d( ) -> NDArray[Any]: ... @overload def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., return_indices: L[True] = ..., -) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_EitherSCT], NDArray[intp], NDArray[intp]]: ... @overload def intersect1d( ar1: ArrayLike, @@ -368,10 +324,10 @@ def intersect1d( @overload def setxor1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def setxor1d( ar1: ArrayLike, @@ -400,9 +356,9 @@ def in1d( @overload def union1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], -) -> NDArray[_SCTNoCast]: ... + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], +) -> NDArray[_EitherSCT]: ... @overload def union1d( ar1: ArrayLike, @@ -411,10 +367,10 @@ def union1d( @overload def setdiff1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def setdiff1d( ar1: ArrayLike, diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index 58875b3c9301..c24fe56ac8a9 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -1,51 +1,46 @@ +# pyright: reportIncompatibleMethodOverride=false + from collections.abc import Generator from types import EllipsisType -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, -) +from typing import Any, Final, TypeAlias, overload + +from typing_extensions import TypeVar -from numpy import ndarray, dtype, generic -from numpy._typing import DTypeLike, NDArray, _Shape as _AnyShape +import numpy as np __all__ = ["Arrayterator"] -# TODO: Rename to ``_ShapeType`` -_Shape = TypeVar("_Shape", bound=_AnyShape) -_DType = TypeVar("_DType", bound=dtype[Any]) -_ScalarType = TypeVar("_ScalarType", bound=generic) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Index: TypeAlias = ( - EllipsisType - | int - | slice - | tuple[EllipsisType | int | slice, ...] -) +_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has # access to all its methods -class Arrayterator(ndarray[_Shape, _DType]): - var: ndarray[_Shape, _DType] # type: ignore[assignment] - buf_size: None | int - start: list[int] - stop: list[int] - step: list[int] +class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): + var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment] + buf_size: Final[int | None] + start: Final[list[int]] + stop: Final[list[int]] + step: Final[list[int]] @property # type: ignore[misc] - def shape(self) -> tuple[int, ...]: ... + def shape(self) -> _ShapeT_co: ... @property - def flat(self: NDArray[_ScalarType]) -> Generator[_ScalarType, None, None]: ... - def __init__( - self, var: ndarray[_Shape, _DType], buf_size: None | int = ... - ) -> None: ... - @overload - def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[_AnyShape, _DType]: ... + def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + + # + def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[tuple[int, ...], _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[tuple[int, ...], _DTypeT_co]]: ... + + # + @overload # type: ignore[override] + def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, dtype: DTypeLike, copy: None | bool = ...) -> NDArray[Any]: ... - def __getitem__(self, index: _Index) -> Arrayterator[_AnyShape, _DType]: ... - def __iter__(self) -> Generator[ndarray[_AnyShape, _DType], None, None]: ... + def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi new file mode 100644 index 000000000000..9f91fdf893a0 --- /dev/null +++ b/numpy/lib/_datasource.pyi @@ -0,0 +1,31 @@ +from pathlib import Path +from typing import IO, Any, TypeAlias + +from _typeshed import OpenBinaryMode, OpenTextMode + +_Mode: TypeAlias = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ..., + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index a55a4c3f6b81..e98dcbb7e741 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,53 +1,60 @@ -from collections.abc import Sequence, Iterator, Callable, Iterable +# ruff: noqa: ANN401 +from collections.abc import Callable, Iterable, Sequence from typing import ( - Concatenate, - Literal as L, Any, + Concatenate, ParamSpec, - TypeAlias, - TypeVar, - overload, Protocol, SupportsIndex, SupportsInt, - TypeGuard, - type_check_only + TypeAlias, + TypeVar, + overload, + type_check_only, ) -from typing_extensions import deprecated +from typing import Literal as L +from _typeshed import Incomplete +from typing_extensions import TypeIs, deprecated + +import numpy as np from numpy import ( - vectorize as vectorize, + _OrderKACF, + bool_, + complex128, + complexfloating, + datetime64, + float64, + floating, generic, integer, - floating, - complexfloating, intp, - float64, - complex128, - timedelta64, - datetime64, object_, - bool_, - _OrderKACF, + timedelta64, + vectorize, ) from numpy._core.multiarray import bincount +from numpy._globals import _NoValueType from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, - _ShapeLike, - _ScalarLike_co, - _DTypeLike, + NDArray, _ArrayLike, _ArrayLikeBool_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, - _ArrayLikeTD64_co, _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, - _FloatLike_co, + _ArrayLikeTD64_co, _ComplexLike_co, + _DTypeLike, + _FloatLike_co, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, ) __all__ = [ @@ -102,12 +109,14 @@ _2Tuple: TypeAlias = tuple[_T, _T] @type_check_only class _TrimZerosSequence(Protocol[_T_co]): - def __len__(self) -> int: ... + def __len__(self, /) -> int: ... @overload def __getitem__(self, key: int, /) -> object: ... @overload def __getitem__(self, key: slice, /) -> _T_co: ... +### + @overload def rot90( m: _ArrayLike[_SCT], @@ -130,72 +139,62 @@ def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: . @overload def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ... -def iterable(y: object) -> TypeGuard[Iterable[Any]]: ... +def iterable(y: object) -> TypeIs[Iterable[Any]]: ... @overload def average( a: _ArrayLikeFloat_co, - axis: None = ..., - weights: None | _ArrayLikeFloat_co= ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> floating[Any]: ... -@overload -def average( - a: _ArrayLikeComplex_co, - axis: None = ..., - weights: None | _ArrayLikeComplex_co = ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> complexfloating[Any, Any]: ... -@overload -def average( - a: _ArrayLikeObject_co, - axis: None = ..., - weights: None | Any = ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> Any: ... + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> floating: ... @overload def average( a: _ArrayLikeFloat_co, - axis: None = ..., - weights: None | _ArrayLikeFloat_co= ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[floating[Any]]: ... + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[floating]: ... @overload def average( a: _ArrayLikeComplex_co, - axis: None = ..., - weights: None | _ArrayLikeComplex_co = ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[complexfloating[Any, Any]]: ... + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> complexfloating: ... @overload def average( - a: _ArrayLikeObject_co, - axis: None = ..., - weights: None | Any = ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[Any]: ... + a: _ArrayLikeComplex_co, + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[complexfloating]: ... @overload def average( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - weights: None | Any = ..., - returned: L[False] = ..., - keepdims: bool = ..., -) -> Any: ... + axis: _ShapeLike | None = None, + weights: object | None = None, + *, + returned: L[True], + keepdims: bool | bool_ | _NoValueType = ..., +) -> _2Tuple[Incomplete]: ... @overload def average( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - weights: None | Any = ..., - returned: L[True] = ..., - keepdims: bool = ..., -) -> _2Tuple[Any]: ... + axis: _ShapeLike | None = None, + weights: object | None = None, + returned: bool | bool_ = False, + *, + keepdims: bool | bool_ | _NoValueType = ..., +) -> Incomplete: ... @overload def asarray_chkfinite( @@ -303,24 +302,87 @@ def diff( append: ArrayLike = ..., ) -> NDArray[Any]: ... -@overload +@overload # float scalar def interp( - x: _ArrayLikeFloat_co, + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> float64: ... +@overload # float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, - left: None | _FloatLike_co = ..., - right: None | _FloatLike_co = ..., - period: None | _FloatLike_co = ..., + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, ) -> NDArray[float64]: ... -@overload +@overload # float scalar or array def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLikeComplex_co, - left: None | _ComplexLike_co = ..., - right: None | _ComplexLike_co = ..., - period: None | _FloatLike_co = ..., + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64] | float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128: ... +@overload # complex or float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128 | float64: ... +@overload # complex array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, ) -> NDArray[complex128]: ... +@overload # complex or float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64]: ... +@overload # complex scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128] | complex128: ... +@overload # complex or float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64] | complex128 | float64: ... @overload def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ... @@ -411,38 +473,46 @@ def cov( dtype: DTypeLike, ) -> NDArray[Any]: ... -# NOTE `bias` and `ddof` have been deprecated +# NOTE `bias` and `ddof` are deprecated and ignored @overload def corrcoef( m: _ArrayLikeFloat_co, - y: None | _ArrayLikeFloat_co = ..., - rowvar: bool = ..., + y: _ArrayLikeFloat_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, - dtype: None = ..., -) -> NDArray[floating[Any]]: ... + dtype: None = None, +) -> NDArray[floating]: ... @overload def corrcoef( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, - dtype: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + dtype: None = None, +) -> NDArray[complexfloating]: ... @overload def corrcoef( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, dtype: _DTypeLike[_SCT], ) -> NDArray[_SCT]: ... @overload def corrcoef( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, - dtype: DTypeLike, + dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ... @@ -514,7 +584,6 @@ def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike, out: _ArrayType, - /, overwrite_input: bool = ..., keepdims: bool = ..., ) -> _ArrayType: ... @@ -682,7 +751,6 @@ def percentile( q: _ArrayLikeFloat_co, axis: None | _ShapeLike, out: _ArrayType, - /, overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: bool = ..., diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index bd508a8b5905..4a1426fd4d6c 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,45 +1,23 @@ from collections.abc import Sequence -from typing import ( - Any, - TypeVar, - Generic, - overload, - Literal, - SupportsIndex, -) +from typing import Any, ClassVar, Final, Generic, SupportsIndex, final, overload +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import Self, TypeVar, deprecated import numpy as np -from numpy import ( - # Circumvent a naming conflict with `AxisConcatenator.matrix` - matrix as _Matrix, - ndenumerate, - ndindex, - ndarray, - dtype, - str_, - bytes_, - int_, - float64, - complex128, -) +from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( - # Arrays ArrayLike, - _NestedSequence, - _FiniteNestedSequence, NDArray, - - # DTypes - DTypeLike, - _SupportsDType, - - # Shapes + _FiniteNestedSequence, + _NestedSequence, _Shape, + _SupportsArray, + _SupportsDType, ) -from numpy._core.multiarray import unravel_index, ravel_multi_index - -__all__ = [ +__all__ = [ # noqa: RUF022 "ravel_multi_index", "unravel_index", "mgrid", @@ -56,114 +34,163 @@ __all__ = [ "diag_indices_from", ] +### + _T = TypeVar("_T") -_DType = TypeVar("_DType", bound=dtype[Any]) -_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) -_TupType = TypeVar("_TupType", bound=tuple[Any, ...]) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) -@overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[_Shape, _DType], ...]: ... -@overload -def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... -@overload -def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ... -@overload -def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... -@overload -def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ... -@overload -def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float64], ...]: ... -@overload -def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex128], ...]: ... +_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) +_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) +_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) +_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) + +### -class nd_grid(Generic[_BoolType]): - sparse: _BoolType - def __init__(self, sparse: _BoolType = ...) -> None: ... +class ndenumerate(Generic[_ScalarT_co]): + @overload + def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + @overload + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... @overload - def __getitem__( - self: nd_grid[Literal[False]], - key: slice | Sequence[slice], - ) -> NDArray[Any]: ... + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... @overload - def __getitem__( - self: nd_grid[Literal[True]], - key: slice | Sequence[slice], - ) -> tuple[NDArray[Any], ...]: ... + def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + @overload + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + @overload + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + @overload + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + @overload + def __new__(cls, arr: object) -> ndenumerate[Any]: ... -class MGridClass(nd_grid[Literal[False]]): - def __init__(self) -> None: ... + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], + /, + ) -> tuple[tuple[int, ...], _ScalarT_co]: ... + @overload + def __next__(self: ndenumerate[np.object_], /) -> tuple[tuple[int, ...], Any]: ... + @overload + def __next__(self, /) -> tuple[tuple[int, ...], _ScalarT_co]: ... + + # + def __iter__(self) -> Self: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, /, *shape: SupportsIndex) -> None: ... + + # + def __iter__(self) -> Self: ... + def __next__(self) -> tuple[int, ...]: ... -mgrid: MGridClass + # + @deprecated("Deprecated since 1.20.0.") + def ndincr(self, /) -> None: ... -class OGridClass(nd_grid[Literal[True]]): +class nd_grid(Generic[_BoolT_co]): + sparse: _BoolT_co + def __init__(self, sparse: _BoolT_co = ...) -> None: ... + @overload + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Any]: ... + @overload + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Any], ...]: ... + +@final +class MGridClass(nd_grid[L[False]]): + def __init__(self) -> None: ... + +@final +class OGridClass(nd_grid[L[True]]): def __init__(self) -> None: ... -ogrid: OGridClass +class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): + __slots__ = "axis", "matrix", "ndmin", "trans1d" + + makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype[Any]]]] -class AxisConcatenator: - axis: int - matrix: bool - ndmin: int - trans1d: int + axis: _AxisT_co + matrix: _MatrixT_co + ndmin: _NDMinT_co + trans1d: _Trans1DT_co + + # def __init__( self, - axis: int = ..., - matrix: bool = ..., - ndmin: int = ..., - trans1d: int = ..., + /, + axis: _AxisT_co = ..., + matrix: _MatrixT_co = ..., + ndmin: _NDMinT_co = ..., + trans1d: _Trans1DT_co = ..., ) -> None: ... + + # TODO(jorenham): annotate this + def __getitem__(self, key: Incomplete, /) -> Incomplete: ... + def __len__(self, /) -> L[0]: ... + + # @staticmethod @overload - def concatenate( # type: ignore[misc] - *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... - ) -> NDArray[Any]: ... + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... @staticmethod @overload - def concatenate( - *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... - ) -> _ArrayType: ... - @staticmethod - def makemat( - data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... - ) -> _Matrix[Any, Any]: ... - - # TODO: Sort out this `__getitem__` method - def __getitem__(self, key: Any) -> Any: ... - -class RClass(AxisConcatenator): - axis: Literal[0] - matrix: Literal[False] - ndmin: Literal[1] - trans1d: Literal[-1] - def __init__(self) -> None: ... + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Any]: ... -r_: RClass - -class CClass(AxisConcatenator): - axis: Literal[-1] - matrix: Literal[False] - ndmin: Literal[2] - trans1d: Literal[0] - def __init__(self) -> None: ... +@final +class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + def __init__(self, /) -> None: ... -c_: CClass +@final +class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + def __init__(self, /) -> None: ... -class IndexExpression(Generic[_BoolType]): - maketuple: _BoolType - def __init__(self, maketuple: _BoolType) -> None: ... +class IndexExpression(Generic[_BoolT_co]): + maketuple: _BoolT_co + def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] + def __getitem__(self, item: _TupleT) -> _TupleT: ... @overload - def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ... + def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... @overload - def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... + def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + +@overload +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_Shape, _DTypeT], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... + +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... + +# +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... -index_exp: IndexExpression[Literal[True]] -s_: IndexExpression[Literal[False]] +# +mgrid: Final[MGridClass] = ... +ogrid: Final[OGridClass] = ... -def fill_diagonal(a: NDArray[Any], val: Any, wrap: bool = ...) -> None: ... -def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ... -def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ... +r_: Final[RClass] = ... +c_: Final[CClass] = ... -# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` +index_exp: Final[IndexExpression[L[True]]] = ... +s_: Final[IndexExpression[L[False]]] = ... diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi new file mode 100644 index 000000000000..c1591b1a0251 --- /dev/null +++ b/numpy/lib/_iotools.pyi @@ -0,0 +1,106 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import Any, ClassVar, Final, Literal, TypedDict, overload, type_check_only + +from typing_extensions import TypeVar, Unpack + +import numpy as np +import numpy.typing as npt + +_T = TypeVar("_T") + +@type_check_only +class _ValidationKwargs(TypedDict, total=False): + excludelist: Iterable[str] | None + deletechars: Iterable[str] | None + case_sensitive: Literal["upper", "lower"] | bool | None + replace_space: str + +### + +__docformat__: Final[str] = "restructuredtext en" + +class ConverterError(Exception): ... +class ConverterLockError(ConverterError): ... +class ConversionWarning(UserWarning): ... + +class LineSplitter: + delimiter: str | int | Iterable[int] | None + comments: str + encoding: str | None + + def __init__( + self, + /, + delimiter: str | bytes | int | Iterable[int] | None = None, + comments: str | bytes = "#", + autostrip: bool = True, + encoding: str | None = None, + ) -> None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] + defaultdeletechars: ClassVar[Sequence[str]] + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... + +# +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype[Any]]: ... +def easy_dtype( + ndtype: npt.DTypeLike, + names: Iterable[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_ValidationKwargs], +) -> np.dtype[np.void]: ... diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index cc90523f15cd..9d0173dbe340 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1761,7 +1761,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this var function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -1958,7 +1958,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this std function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index f0d1bb2b0c68..4dc3a4b9b7e2 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1084,7 +1084,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # be adapted (in principle the concatenate could cast). chunks.append(next_arr.astype(read_dtype_via_object_chunks)) - skiprows = 0 # Only have to skip for first chunk + skiplines = 0 # Only have to skip for first chunk if max_rows >= 0: max_rows -= chunk_size if len(next_arr) < chunk_size: diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 2ab86575601c..16d009524875 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,359 +1,285 @@ -import zipfile import types -from _typeshed import StrOrBytesPath, StrPath, SupportsRead, SupportsWrite, SupportsKeysAndGetItem +import zipfile +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern -from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable -from typing import ( - Literal as L, - Any, - TypeVar, - Generic, - IO, - overload, - Protocol, - type_check_only, -) -from typing_extensions import deprecated +from typing import IO, Any, ClassVar, Generic, Protocol, TypeAlias, overload, type_check_only +from typing import Literal as L -from numpy import ( - recarray, - dtype, - generic, - float64, - void, - record, -) -from numpy.ma.mrecords import MaskedRecords +from _typeshed import StrOrBytesPath, StrPath, SupportsKeysAndGetItem, SupportsRead, SupportsWrite +from typing_extensions import Self, TypeVar, deprecated, override + +import numpy as np from numpy._core.multiarray import packbits, unpackbits -from numpy._typing import ( - ArrayLike, - DTypeLike, - NDArray, - _DTypeLike, - _SupportsArrayFunc, -) +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc +from numpy.ma.mrecords import MaskedRecords + +from ._datasource import DataSource as DataSource __all__ = [ - "savetxt", - "loadtxt", + "fromregex", "genfromtxt", "load", + "loadtxt", + "packbits", "save", + "savetxt", "savez", "savez_compressed", - "packbits", "unpackbits", - "fromregex", ] -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) _T_co = TypeVar("_T_co", covariant=True) -_SCT = TypeVar("_SCT", bound=generic) +_SCT = TypeVar("_SCT", bound=np.generic) +_SCT_co = TypeVar("_SCT_co", bound=np.generic, default=Any, covariant=True) + +_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] +_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] +_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] @type_check_only class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): def seek(self, offset: int, whence: int, /) -> object: ... class BagObj(Generic[_T_co]): - def __init__(self, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str) -> _T_co: ... + def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str, /) -> _T_co: ... def __dir__(self) -> list[str]: ... -class NpzFile(Mapping[str, NDArray[Any]]): +class NpzFile(Mapping[str, NDArray[_SCT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + zip: zipfile.ZipFile - fid: None | IO[str] + fid: IO[str] | None files: list[str] allow_pickle: bool - pickle_kwargs: None | Mapping[str, Any] - _MAX_REPR_ARRAY_COUNT: int - # Represent `f` as a mutable property so we can access the type of `self` - @property - def f(self: _T) -> BagObj[_T]: ... - @f.setter - def f(self: _T, value: BagObj[_T]) -> None: ... + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_SCT_co]] + + # def __init__( self, - fid: IO[str], - own_fid: bool = ..., - allow_pickle: bool = ..., - pickle_kwargs: None | Mapping[str, Any] = ..., - ) -> None: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | types.TracebackType, /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, ) -> None: ... - def close(self) -> None: ... def __del__(self) -> None: ... - def __iter__(self) -> Iterator[str]: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override def __len__(self) -> int: ... - def __getitem__(self, key: str) -> NDArray[Any]: ... - def __contains__(self, key: str) -> bool: ... - def __repr__(self) -> str: ... - -class DataSource: - def __init__(self, destpath: StrPath | None = ...) -> None: ... - def __del__(self) -> None: ... - def abspath(self, path: str) -> str: ... - def exists(self, path: str) -> bool: ... - - # Whether the file-object is opened in string or bytes mode (by default) - # depends on the file-extension of `path` - def open( - self, - path: str, - mode: str = ..., - encoding: None | str = ..., - newline: None | str = ..., - ) -> IO[Any]: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_SCT_co]: ... + def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; # returns an `ndarray`/`memmap` otherwise def load( file: StrOrBytesPath | _SupportsReadSeek[bytes], - mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., - allow_pickle: bool = ..., - fix_imports: bool = ..., - encoding: L["ASCII", "latin1", "bytes"] = ..., + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, ) -> Any: ... @overload -def save( - file: StrPath | SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool = ..., -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... @overload @deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save( - file: StrPath | SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool = ..., - *, - fix_imports: bool, -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... @overload @deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save( - file: StrPath | SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool, - fix_imports: bool, -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... -def savez( - file: StrPath | SupportsWrite[bytes], - *args: ArrayLike, - allow_pickle: bool = ..., - **kwds: ArrayLike, -) -> None: ... +# +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... -def savez_compressed( - file: StrPath | SupportsWrite[bytes], - *args: ArrayLike, - allow_pickle: bool = ..., - **kwds: ArrayLike, -) -> None: ... +# +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, # optionally, `encoding` @overload def loadtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - dtype: None = ..., - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... -) -> NDArray[float64]: ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... @overload def loadtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: _DTypeLike[_SCT], - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_SCT]: ... @overload def loadtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: DTypeLike, - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... def savetxt( - fname: StrPath | SupportsWrite[str] | SupportsWrite[bytes], + fname: _FNameWrite, X: ArrayLike, - fmt: str | Sequence[str] = ..., - delimiter: str = ..., - newline: str = ..., - header: str = ..., - footer: str = ..., - comments: str = ..., - encoding: None | str = ..., + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, ) -> None: ... @overload def fromregex( - file: StrPath | SupportsRead[str] | SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: _DTypeLike[_SCT], - encoding: None | str = ... + encoding: str | None = None, ) -> NDArray[_SCT]: ... @overload def fromregex( - file: StrPath | SupportsRead[str] | SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: DTypeLike, - encoding: None | str = ... + encoding: str | None = None, ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - dtype: None = ..., + fname: _FName, + dtype: None = None, comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: _DTypeLike[_SCT], comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[_SCT]: ... @overload def genfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: DTypeLike, comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload -def recfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... +def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... @overload -def recfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... +def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... @overload -def recfromcsv( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... +def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... @overload -def recfromcsv( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... +def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 5439c533edff..77e5d2de9cb9 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -10,20 +10,13 @@ from typing import ( type_check_only, ) +from typing_extensions import deprecated + import numpy as np -from numpy import ( - generic, - integer, - ufunc, - unsignedinteger, - signedinteger, - floating, - complexfloating, - object_, -) -from numpy._core.shape_base import vstack as row_stack +from numpy import _CastingKind, generic, integer, ufunc, unsignedinteger, signedinteger, floating, complexfloating, object_ from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _ShapeLike, _ArrayLike, @@ -72,6 +65,8 @@ class _SupportsArrayWrap(Protocol): @property def __array_wrap__(self) -> _ArrayWrap: ... +### + def take_along_axis( arr: _SCT | NDArray[_SCT], indices: NDArray[integer[Any]], @@ -119,6 +114,16 @@ def expand_dims( axis: _ShapeLike, ) -> NDArray[Any]: ... +# Deprecated in NumPy 2.0, 2023-08-18 +@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") +def row_stack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> NDArray[Any]: ... + +# @overload def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... @overload diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index e748e91fb908..5d3ea54511b8 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -10,7 +10,6 @@ from typing import ( import numpy as np from numpy import ( generic, - number, timedelta64, datetime64, int_, @@ -56,14 +55,28 @@ __all__ = [ "triu_indices_from", ] +### + _T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[ - [NDArray[int_], _T], - NDArray[number[Any] | np.bool | timedelta64 | datetime64 | object_], -] +_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] + +### @overload def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @@ -87,13 +100,24 @@ def eye( like: None | _SupportsArrayFunc = ..., ) -> NDArray[float64]: ... @overload +def eye( + N: int, + M: None | int, + k: int, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., + *, + device: None | L["cpu"] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload def eye( N: int, M: None | int = ..., k: int = ..., - dtype: _DTypeLike[_SCT] = ..., - order: _OrderCF = ..., *, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[_SCT]: ... @@ -129,12 +153,21 @@ def tri( like: None | _SupportsArrayFunc = ... ) -> NDArray[float64]: ... @overload +def tri( + N: int, + M: None | int, + k: int, + dtype: _DTypeLike[_SCT], + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[_SCT]: ... +@overload def tri( N: int, M: None | int = ..., k: int = ..., - dtype: _DTypeLike[_SCT] = ..., *, + dtype: _DTypeLike[_SCT], like: None | _SupportsArrayFunc = ... ) -> NDArray[_SCT]: ... @overload @@ -148,14 +181,14 @@ def tri( ) -> NDArray[Any]: ... @overload -def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def tril(m: _ArrayLike[_SCT], k: int = 0) -> NDArray[_SCT]: ... @overload -def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload -def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def triu(m: _ArrayLike[_SCT], k: int = 0) -> NDArray[_SCT]: ... @overload -def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload def vander( # type: ignore[misc] @@ -182,38 +215,6 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... - -_Int_co: TypeAlias = np.integer[Any] | np.bool -_Float_co: TypeAlias = np.floating[Any] | _Int_co -_Number_co: TypeAlias = np.number[Any] | np.bool - -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] -_ArrayLike2D: TypeAlias = ( - _SupportsArray[np.dtype[_SCT]] - | Sequence[_ArrayLike1D[_SCT]] -) - -_ArrayLike1DInt_co: TypeAlias = ( - _SupportsArray[np.dtype[_Int_co]] - | Sequence[int | _Int_co] -) -_ArrayLike1DFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[_Float_co]] - | Sequence[float | int | _Float_co] -) -_ArrayLike2DFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[_Float_co]] - | Sequence[_ArrayLike1DFloat_co] -) -_ArrayLike1DNumber_co: TypeAlias = ( - _SupportsArray[np.dtype[_Number_co]] - | Sequence[int | float | complex | _Number_co] -) - -_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) -_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) -_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) - @overload def histogram2d( x: _ArrayLike1D[_SCT_complex], @@ -344,7 +345,6 @@ def histogram2d( NDArray[_SCT_number_co | complex128 | float64], NDArray[_SCT_number_co | complex128 | float64] , ]: ... - @overload def histogram2d( x: _ArrayLike1DNumber_co, diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi new file mode 100644 index 000000000000..d5dfb0573c71 --- /dev/null +++ b/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,220 @@ +from types import EllipsisType +from typing import Any, Generic, SupportsIndex, TypeAlias, TypeVar, overload + +from _typeshed import Incomplete +from typing_extensions import Self, deprecated, override + +import numpy as np +import numpy.typing as npt +from numpy._typing import _ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=Any, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], default=np.dtype[Any], covariant=True) + +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_RealContainerT = TypeVar( + "_RealContainerT", + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], +) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) + +_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] + +_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None +_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] +_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +### + +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[_ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[Any, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype[Any]]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + + # + def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __div__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rdiv__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __idiv__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + + # + def copy(self, /) -> Self: ... + @deprecated("tostring() is deprecated. Use tobytes() instead.") + def tostring(self, /) -> bytes: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/lib/introspect.pyi b/numpy/lib/introspect.pyi new file mode 100644 index 000000000000..7929981cd636 --- /dev/null +++ b/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index c3258e88d04f..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,4 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, NpzFile as NpzFile, + __doc__ as __doc__, ) diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi new file mode 100644 index 000000000000..442530e9cd39 --- /dev/null +++ b/numpy/lib/recfunctions.pyi @@ -0,0 +1,435 @@ +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _DTypeLike, _DTypeLikeVoid +from numpy.ma.mrecords import MaskedRecords + +__all__ = [ + "append_fields", + "apply_along_fields", + "assign_fields_by_name", + "drop_fields", + "find_duplicates", + "flatten_descr", + "get_fieldstructure", + "get_names", + "get_names_flat", + "join_by", + "merge_arrays", + "rec_append_fields", + "rec_drop_fields", + "rec_join", + "recursive_fill_fields", + "rename_fields", + "repack_fields", + "require_fields", + "stack_arrays", + "structured_to_unstructured", + "unstructured_to_structured", +] + +_T = TypeVar("_T") +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) +_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) +_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) + +_OneOrMany: TypeAlias = _T | Iterable[_T] +_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] + +_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] +_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType + +_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] + +### + +def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype[Any]]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays( + seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype[Any]]] | np.ndarray[_ShapeT, np.dtype[Any]], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[Any, np.dtype[np.void]]: ... + +# +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields( + base: MaskedRecords[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.recarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +@overload +def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +@overload +def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[_ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields( + func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], + arr: np.ndarray[_ShapeT, np.dtype[np.void]], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields( + array: np.ndarray[_ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays( + arrays: _ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> _ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... + +# +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index f237dffbc244..0cac8819f5fd 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -283,7 +283,7 @@ import numpy as np from numpy.testing import ( assert_, assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, IS_PYPY, IS_WASM + assert_warns, IS_PYPY, IS_WASM, IS_64BIT ) from numpy.testing._private.utils import requires_memory from numpy.lib import format @@ -927,8 +927,7 @@ def test_large_file_support(tmpdir): @pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy") -@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, - reason="test requires 64-bit system") +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) def test_large_archive(tmpdir): diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ed59a4a86181..c97ef92a7889 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2925,6 +2925,27 @@ def test_error_not_1d(self, vals): with assert_raises(ValueError): np.bincount(vals) + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) + def test_gh_28354(self, dt): + a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + def test_contiguous_handling(self): + # check for absence of hard crash + np.bincount(np.arange(10000)[::2]) + + def test_gh_28354_array_like(self): + class A: + def __array__(self): + return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64) + + a = A() + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + class TestInterp: diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 44aac93db1ff..742915e22ef0 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2796,8 +2796,10 @@ def test_load_multiple_arrays_until_eof(): np.save(f, 1) np.save(f, 2) f.seek(0) - assert np.load(f) == 1 - assert np.load(f) == 2 + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 with pytest.raises(EOFError): np.load(f) diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 116cd1608da3..60717be3bd9a 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -1073,3 +1073,28 @@ def test_maxrows_exceeding_chunksize(nmax): res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) os.remove(fname) assert len(res) == nmax + +@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000)) +def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip): + # tries to read a file in chunks by skipping a variable amount of lines, + # less, equal, greater than max_rows + file_length = 110000 + data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1)) + expected_length = min(60000, file_length - nskip) + expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str) + + # file-like path + txt = StringIO(data) + res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) + + # file-obj path + tmp_file = tmpdir / "test_data.txt" + tmp_file.write(data) + fname = str(tmp_file) + res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) diff --git a/numpy/lib/user_array.pyi b/numpy/lib/user_array.pyi new file mode 100644 index 000000000000..9b90d893326b --- /dev/null +++ b/numpy/lib/user_array.pyi @@ -0,0 +1 @@ +from ._user_array_impl import container as container diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index d3ca3eb701b7..9f646ec94037 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -16,7 +16,6 @@ from numpy import ( vecdot, # other - generic, floating, complexfloating, signedinteger, @@ -79,13 +78,13 @@ __all__ = [ "vecdot", ] -_T = TypeVar("_T") _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) -_2Tuple: TypeAlias = tuple[_T, _T] _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] +### + +fortran_int = np.intc class EigResult(NamedTuple): eigenvalues: NDArray[Any] @@ -176,11 +175,11 @@ def matrix_power( ) -> NDArray[Any]: ... @overload -def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... @overload -def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating[Any]]: ... @overload -def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating[Any, Any]]: ... @overload def outer(x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown) -> NDArray[Any]: ... @@ -373,12 +372,16 @@ def norm( @overload def matrix_norm( x: ArrayLike, + /, + *, ord: None | float | L["fro", "nuc"] = ..., keepdims: bool = ..., ) -> floating[Any]: ... @overload def matrix_norm( x: ArrayLike, + /, + *, ord: None | float | L["fro", "nuc"] = ..., keepdims: bool = ..., ) -> Any: ... @@ -386,6 +389,8 @@ def matrix_norm( @overload def vector_norm( x: ArrayLike, + /, + *, axis: None = ..., ord: None | float = ..., keepdims: bool = ..., @@ -393,6 +398,8 @@ def vector_norm( @overload def vector_norm( x: ArrayLike, + /, + *, axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., ord: None | float = ..., keepdims: bool = ..., @@ -407,37 +414,49 @@ def multi_dot( def diagonal( x: ArrayLike, # >= 2D array + /, + *, offset: SupportsIndex = ..., ) -> NDArray[Any]: ... def trace( x: ArrayLike, # >= 2D array + /, + *, offset: SupportsIndex = ..., dtype: DTypeLike = ..., ) -> Any: ... @overload def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, + /, + *, axis: int = ..., ) -> NDArray[unsignedinteger[Any]]: ... @overload def cross( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, + /, + *, axis: int = ..., ) -> NDArray[signedinteger[Any]]: ... @overload def cross( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, + /, + *, axis: int = ..., ) -> NDArray[floating[Any]]: ... @overload def cross( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, + /, + *, axis: int = ..., ) -> NDArray[complexfloating[Any, Any]]: ... diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi new file mode 100644 index 000000000000..cd07acdb1f9e --- /dev/null +++ b/numpy/linalg/_umath_linalg.pyi @@ -0,0 +1,61 @@ +from typing import Final +from typing import Literal as L + +import numpy as np +from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 + +__version__: Final[str] = ... +_ilp64: Final[bool] = ... + +### +# 1 -> 1 + +# (m,m) -> () +det: Final[np.ufunc] = ... +# (m,m) -> (m) +cholesky_lo: Final[np.ufunc] = ... +cholesky_up: Final[np.ufunc] = ... +eigvals: Final[np.ufunc] = ... +eigvalsh_lo: Final[np.ufunc] = ... +eigvalsh_up: Final[np.ufunc] = ... +# (m,m) -> (m,m) +inv: Final[np.ufunc] = ... +# (m,n) -> (p) +qr_r_raw: Final[np.ufunc] = ... +svd: Final[np.ufunc] = ... + +### +# 1 -> 2 + +# (m,m) -> (), () +slogdet: Final[np.ufunc] = ... +# (m,m) -> (m), (m,m) +eig: Final[np.ufunc] = ... +eigh_lo: Final[np.ufunc] = ... +eigh_up: Final[np.ufunc] = ... + +### +# 2 -> 1 + +# (m,n), (n) -> (m,m) +qr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ... +# (m,n), (k) -> (m,k) +qr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ... +# (m,m), (m,n) -> (m,n) +solve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ... +# (m,m), (m) -> (m) +solve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ... + +### +# 1 -> 3 + +# (m,n) -> (m,m), (p), (n,n) +svd_f: Final[np.ufunc] = ... +# (m,n) -> (m,p), (p), (p,n) +svd_s: Final[np.ufunc] = ... + +### +# 3 -> 4 + +# (m,n), (m,k), () -> (n,k), (k), (), (p) +lstsq: Final[np.ufunc] = ... diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi new file mode 100644 index 000000000000..0f6bfa3a022b --- /dev/null +++ b/numpy/linalg/lapack_lite.pyi @@ -0,0 +1,141 @@ +from typing import Any, Final, TypedDict, type_check_only + +import numpy as np +from numpy._typing import NDArray + +from ._linalg import fortran_int + +### + +@type_check_only +class _GELSD(TypedDict): + m: int + n: int + nrhs: int + lda: int + ldb: int + rank: int + lwork: int + info: int + +@type_check_only +class _DGELSD(_GELSD): + dgelsd_: int + rcond: float + +@type_check_only +class _ZGELSD(_GELSD): + zgelsd_: int + +@type_check_only +class _GEQRF(TypedDict): + m: int + n: int + lda: int + lwork: int + info: int + +@type_check_only +class _DGEQRF(_GEQRF): + dgeqrf_: int + +@type_check_only +class _ZGEQRF(_GEQRF): + zgeqrf_: int + +@type_check_only +class _DORGQR(TypedDict): + dorgqr_: int + info: int + +@type_check_only +class _ZUNGQR(TypedDict): + zungqr_: int + info: int + +### + +_ilp64: Final[bool] = ... + +def dgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.float64], + lda: int, + b: NDArray[np.float64], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.float64], + lwork: int, + iwork: NDArray[fortran_int], + info: int, +) -> _DGELSD: ... +def zgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.complex128], + lda: int, + b: NDArray[np.complex128], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.complex128], + lwork: int, + rwork: NDArray[np.float64], + iwork: NDArray[fortran_int], + info: int, +) -> _ZGELSD: ... + +# +def dgeqrf( + m: int, + n: int, + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.float64], # out, shape: (min(m, n),) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DGEQRF: ... +def zgeqrf( + m: int, + n: int, + a: NDArray[np.complex128], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.complex128], # out, shape: (min(m, n),) + work: NDArray[np.complex128], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _ZGEQRF: ... + +# +def dorgqr( + m: int, # >=0 + n: int, # m >= n >= 0 + k: int, # n >= k >= 0 + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, # >= max(1, m) + tau: NDArray[np.float64], # in, shape: (k,) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DORGQR: ... +def zungqr( + m: int, + n: int, + k: int, + a: NDArray[np.complex128], + lda: int, + tau: NDArray[np.complex128], + work: NDArray[np.complex128], + lwork: int, + info: int, +) -> _ZUNGQR: ... + +# +def xerbla(srname: object, info: int) -> None: ... diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi new file mode 100644 index 000000000000..dbe9becfb8d5 --- /dev/null +++ b/numpy/linalg/linalg.pyi @@ -0,0 +1,69 @@ +from ._linalg import ( + LinAlgError, + cholesky, + cond, + cross, + det, + diagonal, + eig, + eigh, + eigvals, + eigvalsh, + inv, + lstsq, + matmul, + matrix_norm, + matrix_power, + matrix_rank, + matrix_transpose, + multi_dot, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, + trace, + vecdot, + vector_norm, +) + +__all__ = [ + "LinAlgError", + "cholesky", + "cond", + "cross", + "det", + "diagonal", + "eig", + "eigh", + "eigvals", + "eigvalsh", + "inv", + "lstsq", + "matmul", + "matrix_norm", + "matrix_power", + "matrix_rank", + "matrix_transpose", + "multi_dot", + "norm", + "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", + "tensordot", + "tensorinv", + "tensorsolve", + "trace", + "vecdot", + "vector_norm", +] diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index 740c9f56c6fa..e2f8136208d6 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -45,7 +45,10 @@ py.install_sources( '__init__.pyi', '_linalg.py', '_linalg.pyi', + '_umath_linalg.pyi', + 'lapack_lite.pyi', 'linalg.py', + 'linalg.pyi', ], subdir: 'numpy/linalg' ) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index b76d090add03..97d6c9eafa5a 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -21,12 +21,12 @@ """ # pylint: disable-msg=E1002 import builtins +import functools import inspect import operator import warnings import textwrap import re -from functools import reduce from typing import Dict import numpy as np @@ -939,6 +939,7 @@ def __init__(self, ufunc): self.f = ufunc self.__doc__ = ufunc.__doc__ self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ def __str__(self): return f"Masked version of {self.f}" @@ -3157,7 +3158,7 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] - m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func) if domain is not None: @@ -7099,6 +7100,7 @@ class _frommethod: def __init__(self, methodname, reversed=False): self.__name__ = methodname + self.__qualname__ = methodname self.__doc__ = self.getdoc() self.reversed = reversed diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 57136fa9d31c..83c0636ce4a7 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,19 +1,13 @@ -from collections.abc import Callable -from typing import Any, TypeVar - -from numpy import ( - amax, - amin, - bool_, - expand_dims, - clip, - indices, - squeeze, - angle, - ndarray, - dtype, - float64, -) +# pyright: reportIncompatibleMethodOverride=false +# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204 + +from typing import Any, SupportsIndex, TypeVar + +from _typeshed import Incomplete +from typing_extensions import deprecated + +from numpy import _OrderKACF, amax, amin, bool_, dtype, expand_dims, float64, ndarray +from numpy._typing import ArrayLike, _DTypeLikeBool __all__ = [ "MAError", @@ -111,8 +105,8 @@ __all__ = [ "less", "less_equal", "log", - "log10", "log2", + "log10", "logical_and", "logical_not", "logical_or", @@ -257,6 +251,7 @@ cosh: _MaskedUnaryOperation tanh: _MaskedUnaryOperation abs: _MaskedUnaryOperation absolute: _MaskedUnaryOperation +angle: _MaskedUnaryOperation fabs: _MaskedUnaryOperation negative: _MaskedUnaryOperation floor: _MaskedUnaryOperation @@ -284,20 +279,21 @@ greater_equal: _MaskedBinaryOperation less: _MaskedBinaryOperation greater: _MaskedBinaryOperation logical_and: _MaskedBinaryOperation -alltrue: _MaskedBinaryOperation +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... logical_or: _MaskedBinaryOperation -sometrue: Callable[..., Any] +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... logical_xor: _MaskedBinaryOperation bitwise_and: _MaskedBinaryOperation bitwise_or: _MaskedBinaryOperation bitwise_xor: _MaskedBinaryOperation hypot: _MaskedBinaryOperation -divide: _MaskedBinaryOperation -true_divide: _MaskedBinaryOperation -floor_divide: _MaskedBinaryOperation -remainder: _MaskedBinaryOperation -fmod: _MaskedBinaryOperation -mod: _MaskedBinaryOperation + +divide: _DomainedBinaryOperation +true_divide: _DomainedBinaryOperation +floor_divide: _DomainedBinaryOperation +remainder: _DomainedBinaryOperation +fmod: _DomainedBinaryOperation +mod: _DomainedBinaryOperation def make_mask_descr(ndtype): ... def getmask(a): ... @@ -448,10 +444,10 @@ class MaskedArray(ndarray[_ShapeType_co, _DType_co]): def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def round(self, decimals=..., out=...): ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... - def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... + def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... # NOTE: deprecated # def tostring(self, fill_value=..., order=...): ... @@ -460,6 +456,7 @@ class MaskedArray(ndarray[_ShapeType_co, _DType_co]): def partition(self, *args, **kwargs): ... def argpartition(self, *args, **kwargs): ... def take(self, indices, axis=..., out=..., mode=...): ... + copy: Any diagonal: Any flatten: Any @@ -468,19 +465,26 @@ class MaskedArray(ndarray[_ShapeType_co, _DType_co]): swapaxes: Any T: Any transpose: Any + @property # type: ignore[misc] def mT(self): ... - def tolist(self, fill_value=...): ... - def tobytes(self, fill_value=..., order=...): ... - def tofile(self, fid, sep=..., format=...): ... - def toflex(self): ... - torecords: Any + + # + def toflex(self) -> Incomplete: ... + def torecords(self) -> Incomplete: ... + def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... + @deprecated("tostring() is deprecated. Use tobytes() instead.") + def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # def __reduce__(self): ... def __deepcopy__(self, memo=...): ... class mvoid(MaskedArray[_ShapeType_co, _DType_co]): def __new__( - self, + self, # pyright: ignore[reportSelfClsParameterName] data, mask=..., dtype=..., @@ -593,8 +597,8 @@ maximum: _extrema_operation def take(a, indices, axis=..., out=..., mode=...): ... def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... -def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... +def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... def compressed(x): ... def concatenate(arrays, axis=...): ... def diag(v, k=...): ... @@ -629,19 +633,21 @@ def asanyarray(a, dtype=...): ... def fromflex(fxarray): ... class _convert2ma: - __doc__: Any - def __init__(self, funcname, params=...): ... - def getdoc(self): ... - def __call__(self, *args, **params): ... + def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... + def __call__(self, /, *args: object, **params: object) -> Any: ... # noqa: ANN401 + def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... arange: _convert2ma +clip: _convert2ma empty: _convert2ma empty_like: _convert2ma frombuffer: _convert2ma fromfunction: _convert2ma identity: _convert2ma +indices: _convert2ma ones: _convert2ma ones_like: _convert2ma +squeeze: _convert2ma zeros: _convert2ma zeros_like: _convert2ma diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index d9d8e124d31d..bdc35c424ce3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -249,6 +249,7 @@ class _fromnxfunction: def __init__(self, funcname): self.__name__ = funcname + self.__qualname__ = funcname self.__doc__ = self.getdoc() def getdoc(self): diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index df69cd5d3465..ba76f3517526 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,7 +1,10 @@ -from typing import Any +from _typeshed import Incomplete +import numpy as np +from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator -from .core import dot, mask_rowcols + +from .core import MaskedArray, dot __all__ = [ "apply_along_axis", @@ -17,8 +20,8 @@ __all__ = [ "compress_nd", "compress_rowcols", "compress_rows", - "count_masked", "corrcoef", + "count_masked", "cov", "diagflat", "dot", @@ -28,9 +31,9 @@ __all__ = [ "flatnotmasked_edges", "hsplit", "hstack", - "isin", "in1d", "intersect1d", + "isin", "mask_cols", "mask_rowcols", "mask_rows", @@ -46,8 +49,8 @@ __all__ = [ "setdiff1d", "setxor1d", "stack", - "unique", "union1d", + "unique", "vander", "vstack", ] @@ -57,9 +60,9 @@ def masked_all(shape, dtype = ...): ... def masked_all_like(arr): ... class _fromnxfunction: - __name__: Any - __doc__: Any - def __init__(self, funcname): ... + __name__: Incomplete + __doc__: Incomplete + def __init__(self, funcname) -> None: ... def getdoc(self): ... def __call__(self, *args, **params): ... @@ -88,7 +91,6 @@ diagflat: _fromnxfunction_single def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... -def average(a, axis=..., weights=..., returned=..., keepdims=...): ... def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... def compress_nd(x, axis=...): ... def compress_rowcols(x, axis=...): ... @@ -108,13 +110,13 @@ def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... class MAxisConcatenator(AxisConcatenator): - concatenate: Any + @staticmethod + def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod - def makemat(cls, arr): ... - def __getitem__(self, key): ... + def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): - def __init__(self): ... + def __init__(self) -> None: ... mr_: mr_class @@ -127,3 +129,6 @@ def clump_unmasked(a): ... def clump_masked(a): ... def vander(x, n=...): ... def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... + +# +def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 17fa26c351d3..53651004db9a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -23,7 +23,7 @@ import numpy._core.umath as umath from numpy.exceptions import AxisError from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM + assert_raises, assert_warns, suppress_warnings, IS_WASM, temppath ) from numpy.testing._private.utils import requires_memory from numpy import ndarray @@ -1019,8 +1019,9 @@ def test_maskedarray_tofile_raises_notimplementederror(self): xm = masked_array([1, 2, 3], mask=[False, True, False]) # Test case to check the NotImplementedError. # It is not implemented at this point of time. We can change this in future - with pytest.raises(NotImplementedError): - np.save('xm.np', xm) + with temppath(suffix='.npy') as path: + with pytest.raises(NotImplementedError): + np.save(path, xm) class TestMaskedArrayArithmetic: diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py index 9ae4c63c8e9a..9c157308fcbd 100644 --- a/numpy/ma/timer_comparison.py +++ b/numpy/ma/timer_comparison.py @@ -1,5 +1,5 @@ +import functools import timeit -from functools import reduce import numpy as np import numpy._core.fromnumeric as fromnumeric @@ -133,10 +133,10 @@ def test_1(self): xf = np.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) - assert((xm-ym).filled(0).any()) + assert (xm-ym).filled(0).any() s = x.shape - assert(xm.size == reduce(lambda x, y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + assert xm.size == functools.reduce(lambda x, y: x*y, s) + assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) for s in [(4, 3), (6, 2)]: x.shape = s @@ -144,7 +144,7 @@ def test_1(self): xm.shape = s ym.shape = s xf.shape = s - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) @np.errstate(all='ignore') def test_2(self): diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi new file mode 100644 index 000000000000..c6a10c6327ef --- /dev/null +++ b/numpy/matlib.pyi @@ -0,0 +1,586 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt + +# ruff: noqa: F401 +from numpy import ( + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + cdouble, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + f2py, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + hanning, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + in1d, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + split, + sqrt, + square, + squeeze, + stack, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + trapz, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + typing, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_T = TypeVar("_T", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# ruff: noqa: F811 + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +@overload +def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 03476555e59e..a6095cc1155a 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,10 +1,10 @@ -from collections.abc import Sequence, Mapping +from collections.abc import Mapping, Sequence from typing import Any from numpy import matrix from numpy._typing import ArrayLike, DTypeLike, NDArray -__all__ = ["matrix", "bmat", "asmatrix"] +__all__ = ["asmatrix", "bmat", "matrix"] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], @@ -12,6 +12,6 @@ def bmat( gdict: None | Mapping[str, Any] = ..., ) -> matrix[tuple[int, int], Any]: ... -def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[tuple[int, int], Any]: ... - -mat = asmatrix +def asmatrix( + data: ArrayLike, dtype: DTypeLike = ... +) -> matrix[tuple[int, int], Any]: ... diff --git a/numpy/meson.build b/numpy/meson.build index 88c4029adae9..6fef05b9113f 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -223,6 +223,7 @@ null_dep = dependency('', required : false) atomic_dep = null_dep code_non_lockfree = ''' #include + #include int main() { struct { void *p; @@ -230,10 +231,10 @@ code_non_lockfree = ''' } x; x.p = NULL; x.u8v = 0; - uint8_t res = __atomic_load_n(x.u8v, __ATOMIC_SEQ_CST); - __atomic_store_n(x.u8v, 1, ATOMIC_SEQ_CST); - void *p = __atomic_load_n(x.p, __ATOMIC_SEQ_CST); - __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST) + uint8_t res = __atomic_load_n(&x.u8v, __ATOMIC_SEQ_CST); + __atomic_store_n(&x.u8v, 1, __ATOMIC_SEQ_CST); + void *p = __atomic_load_n((void **)x.p, __ATOMIC_SEQ_CST); + __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST); return 0; } ''' @@ -275,11 +276,15 @@ python_sources = [ '_array_api_info.py', '_array_api_info.pyi', '_configtool.py', + '_configtool.pyi', '_distributor_init.py', + '_distributor_init.pyi', '_globals.py', + '_globals.pyi', '_pytesttester.py', '_pytesttester.pyi', '_expired_attrs_2_0.py', + '_expired_attrs_2_0.pyi', 'conftest.py', 'ctypeslib.py', 'ctypeslib.pyi', @@ -288,6 +293,7 @@ python_sources = [ 'dtypes.py', 'dtypes.pyi', 'matlib.py', + 'matlib.pyi', 'py.typed', 'version.pyi', ] diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 84b97883223d..7ed4a959625f 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,29 +1,17 @@ from collections.abc import Callable -from typing import Any, TypeAlias, overload, TypeVar, Literal +from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np -from numpy import ( - dtype, - float32, - float64, - int8, - int16, - int32, - int64, - int_, - uint, - uint8, - uint16, - uint32, - uint64, -) -from numpy.random import BitGenerator, SeedSequence, RandomState +from numpy import dtype, float32, float64, int64 from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _BoolCodes, _DoubleCodes, + _DTypeLike, _DTypeLikeBool, _Float32Codes, _Float64Codes, @@ -32,7 +20,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, + _IntPCodes, _ShapeLike, _SingleCodes, _SupportsDType, @@ -40,10 +28,11 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, + _UIntPCodes, ) +from numpy.random import BitGenerator, RandomState, SeedSequence -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_IntegerT = TypeVar("_IntegerT", bound=np.integer) _DTypeLikeFloat32: TypeAlias = ( dtype[float32] @@ -198,249 +187,296 @@ class Generator: ) -> float: ... # type: ignore[misc] @overload def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload - def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> NDArray[float64]: ... + + # @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - *, - endpoint: bool = ..., - ) -> int: ... + high: int | None = None, + size: None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> np.int64: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[bool] = ..., - endpoint: bool = ..., + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + endpoint: bool = False, ) -> bool: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[np.bool] = ..., - endpoint: bool = ..., - ) -> np.bool: ... + high: int | None = None, + size: None = None, + *, + dtype: type[int], + endpoint: bool = False, + ) -> int: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[int] = ..., - endpoint: bool = ..., - ) -> int: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + endpoint: bool = False, + ) -> np.bool: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - endpoint: bool = ..., - ) -> uint8: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> _IntegerT: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - endpoint: bool = ..., - ) -> uint16: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> NDArray[np.int64]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - endpoint: bool = ..., - ) -> uint32: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + endpoint: bool = False, + ) -> NDArray[np.bool]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - endpoint: bool = ..., - ) -> uint: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> NDArray[_IntegerT]: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - endpoint: bool = ..., - ) -> uint64: ... + high: int | None = None, + size: None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> np.int8: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - endpoint: bool = ..., - ) -> int8: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> NDArray[np.int8]: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - endpoint: bool = ..., - ) -> int16: ... + high: int | None = None, + size: None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> np.uint8: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - endpoint: bool = ..., - ) -> int32: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> NDArray[np.uint8]: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - endpoint: bool = ..., - ) -> int_: ... + high: int | None = None, + size: None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> np.int16: ... @overload - def integers( # type: ignore[misc] + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> NDArray[np.int16]: ... + @overload + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - endpoint: bool = ..., - ) -> int64: ... + high: int | None = None, + size: None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> np.uint16: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, *, - endpoint: bool = ... - ) -> NDArray[int64]: ... + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> NDArray[np.uint16]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: _DTypeLikeBool = ..., - endpoint: bool = ..., - ) -> NDArray[np.bool]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> np.int32: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - endpoint: bool = ..., - ) -> NDArray[int8]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> NDArray[np.int32]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - endpoint: bool = ..., - ) -> NDArray[int16]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> np.uint32: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - endpoint: bool = ..., - ) -> NDArray[int32]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> NDArray[np.uint32]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - endpoint: bool = ..., - ) -> NDArray[int64]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> np.uint64: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint8]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> NDArray[np.uint64]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint16]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> np.intp: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint32]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> NDArray[np.intp]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint64]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> np.uintp: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - endpoint: bool = ..., - ) -> NDArray[int_]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> NDArray[np.uintp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> Any: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint]: ... - # TODO: Use a TypeVar _T here to get away from Any output? Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> NDArray[Any]: ... + + # TODO: Use a TypeVar _T here to get away from Any output? + # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] @overload def choice( self, @@ -547,7 +583,9 @@ class Generator: out: None | NDArray[float64] = ..., ) -> NDArray[float64]: ... @overload - def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma( + self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + ) -> float: ... # type: ignore[misc] @overload def gamma( self, @@ -556,13 +594,23 @@ class Generator: size: None | _ShapeLike = ..., ) -> NDArray[float64]: ... @overload - def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def f( + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_f( + self, + dfnum: _FloatLike_co, + dfden: _FloatLike_co, + nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def noncentral_f( self, @@ -578,10 +626,15 @@ class Generator: self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_chisquare( + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] @@ -594,10 +647,15 @@ class Generator: self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises( + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] @@ -684,10 +742,15 @@ class Generator: self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def wald( + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def triangular( @@ -712,10 +775,15 @@ class Generator: self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> NDArray[int64]: ... @overload - def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def negative_binomial( + self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + ) -> int: ... # type: ignore[misc] @overload def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[int64]: ... @overload def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] @@ -736,7 +804,9 @@ class Generator: self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> NDArray[int64]: ... @overload - def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = ... + ) -> int: ... # type: ignore[misc] @overload def hypergeometric( self, diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ed44a82f25fe..ac2f64a0f81c 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -5082,3 +5082,6 @@ def default_rng(seed=None): # Otherwise we need to instantiate a new BitGenerator and Generator as # normal. return Generator(PCG64(seed)) + + +default_rng.__module__ = "numpy.random" diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi new file mode 100644 index 000000000000..d4c6e8155ae9 --- /dev/null +++ b/numpy/random/_pickle.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable +from typing import Final, Literal, TypeVar, TypedDict, overload, type_check_only + +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64, PCG64DXSM +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import BitGenerator +from numpy.random.mtrand import RandomState + +_T = TypeVar("_T", bound=BitGenerator) + +@type_check_only +class _BitGenerators(TypedDict): + MT19937: type[MT19937] + PCG64: type[PCG64] + PCG64DXSM: type[PCG64DXSM] + Philox: type[Philox] + SFC64: type[SFC64] + +BitGenerators: Final[_BitGenerators] = ... + +@overload +def __bit_generator_ctor(bit_generator: Literal["MT19937"] = "MT19937") -> MT19937: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64"]) -> PCG64: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64DXSM"]) -> PCG64DXSM: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... +@overload +def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __generator_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> Generator: ... +def __randomstate_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> RandomState: ... diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 8dfbcd9909dd..78fb769683d3 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,128 +1,107 @@ import abc -from threading import Lock from collections.abc import Callable, Mapping, Sequence -from typing import ( - Any, - NamedTuple, - TypeAlias, - TypedDict, - TypeVar, - overload, - Literal, - type_check_only, -) - -from numpy import dtype, uint32, uint64 -from numpy._typing import ( - NDArray, - _ArrayLikeInt_co, - _ShapeLike, - _SupportsDType, - _UInt32Codes, - _UInt64Codes, -) - -_T = TypeVar("_T") - -_DTypeLikeUint32: TypeAlias = ( - dtype[uint32] - | _SupportsDType[dtype[uint32]] - | type[uint32] - | _UInt32Codes -) -_DTypeLikeUint64: TypeAlias = ( - dtype[uint64] - | _SupportsDType[dtype[uint64]] - | type[uint64] - | _UInt64Codes -) +from threading import Lock +from typing import Any, ClassVar, Literal, NamedTuple, TypeAlias, TypedDict, overload, type_check_only + +from _typeshed import Incomplete +from typing_extensions import CapsuleType, Self + +import numpy as np +from numpy._typing import NDArray, _ArrayLikeInt_co, _DTypeLike, _ShapeLike, _UInt32Codes, _UInt64Codes + +__all__ = ["BitGenerator", "SeedSequence"] + +### + +_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes @type_check_only class _SeedSeqState(TypedDict): - entropy: None | int | Sequence[int] + entropy: int | Sequence[int] | None spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int @type_check_only class _Interface(NamedTuple): - state_address: Any - state: Any - next_uint64: Any - next_uint32: Any - next_double: Any - bit_generator: Any + state_address: Incomplete + state: Incomplete + next_uint64: Incomplete + next_uint32: Incomplete + next_double: Incomplete + bit_generator: Incomplete + +@type_check_only +class _CythonMixin: + def __setstate_cython__(self, pyx_state: object, /) -> None: ... + def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + +@type_check_only +class _GenerateStateMixin(_CythonMixin): + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +### class ISeedSequence(abc.ABC): @abc.abstractmethod - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... -class ISpawnableSeedSequence(ISeedSequence): +class ISpawnableSeedSequence(ISeedSequence, abc.ABC): @abc.abstractmethod - def spawn(self: _T, n_children: int) -> list[_T]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedlessSeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + def spawn(self, /, n_children: int) -> list[Self]: ... -class SeedlessSeedSequence(ISpawnableSeedSequence): - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... - def spawn(self: _T, n_children: int) -> list[_T]: ... +class SeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + __pyx_vtable__: ClassVar[CapsuleType] = ... -class SeedSequence(ISpawnableSeedSequence): - entropy: None | int | Sequence[int] + entropy: int | Sequence[int] | None spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int - pool: NDArray[uint32] + pool: NDArray[np.uint32] + def __init__( self, - entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ..., + /, + entropy: _ArrayLikeInt_co | None = None, *, - spawn_key: Sequence[int] = ..., - pool_size: int = ..., + spawn_key: Sequence[int] = (), + pool_size: int = 4, n_children_spawned: int = ..., ) -> None: ... - def __repr__(self) -> str: ... + def spawn(self, /, n_children: int) -> list[Self]: ... @property - def state( - self, - ) -> _SeedSeqState: ... - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... - def spawn(self, n_children: int) -> list[SeedSequence]: ... + def state(self) -> _SeedSeqState: ... -class BitGenerator(abc.ABC): +class BitGenerator(_CythonMixin, abc.ABC): lock: Lock - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ... - def __setstate__( - self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence] - ) -> None: ... - def __reduce__( - self, - ) -> tuple[ - Callable[[str], BitGenerator], - tuple[str], - tuple[dict[str, Any], ISeedSequence] - ]: ... - @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... @state.setter - def state(self, value: Mapping[str, Any]) -> None: ... + def state(self, value: Mapping[str, Any], /) -> None: ... @property def seed_seq(self) -> ISeedSequence: ... - def spawn(self, n_children: int) -> list[BitGenerator]: ... - @overload - def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc] - @overload - def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> NDArray[uint64]: ... # type: ignore[misc] - @overload - def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] - def _benchmark(self, cnt: int, method: str = ...) -> None: ... @property def ctypes(self) -> _Interface: ... @property def cffi(self) -> _Interface: ... + @property + def capsule(self) -> CapsuleType: ... + + # + def __init__(self, /, seed: _ArrayLikeInt_co | SeedSequence | None = None) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Self], tuple[str], tuple[Mapping[str, Any], ISeedSequence]]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + def _benchmark(self, /, cnt: int, method: str = "uint64") -> None: ... + + # + @overload + def random_raw(self, /, size: None = None, output: Literal[True] = True) -> int: ... + @overload + def random_raw(self, /, size: _ShapeLike, output: Literal[True] = True) -> NDArray[np.uint64]: ... + @overload + def random_raw(self, /, size: _ShapeLike | None, output: Literal[False]) -> None: ... + @overload + def random_raw(self, /, size: _ShapeLike | None = None, *, output: Literal[False]) -> None: ... diff --git a/numpy/random/meson.build b/numpy/random/meson.build index f2f2e0ac755c..be342c443b32 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -104,6 +104,7 @@ py.install_sources( '_mt19937.pyi', '_pcg64.pyi', '_pickle.py', + '_pickle.pyi', '_philox.pyi', '_sfc64.pyi', 'bit_generator.pxd', diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 7db3b15fb2fb..26d0f5f4d1a4 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4902,6 +4902,7 @@ def ranf(*args, **kwargs): return _rand.random_sample(*args, **kwargs) __all__ = [ + 'RandomState', 'beta', 'binomial', 'bytes', @@ -4954,5 +4955,18 @@ __all__ = [ 'wald', 'weibull', 'zipf', - 'RandomState', ] + +seed.__module__ = "numpy.random" +ranf.__module__ = "numpy.random" +sample.__module__ = "numpy.random" +get_bit_generator.__module__ = "numpy.random" +set_bit_generator.__module__ = "numpy.random" + +# The first item in __all__ is 'RandomState', so it can be skipped here. +for method_name in __all__[1:]: + method = getattr(RandomState, method_name, None) + if method is not None: + method.__module__ = "numpy.random" + +del method, method_name diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 514f9af2ce8c..c9dc81e96a37 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1244,7 +1244,7 @@ def test_dirichlet_small_alpha(self): @pytest.mark.slow def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path - alpha = np.array([0.02, 0.04, 0.03]) + alpha = np.array([0.02, 0.04]) exact_mean = alpha / alpha.sum() random = Generator(MT19937(self.seed)) sample = random.dirichlet(alpha, size=20000000) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index e47b8f9546c6..ba3c9a2b7a44 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,97 +2,101 @@ from unittest import TestCase from . import overrides from ._private.utils import ( - NOGIL_BUILD, - IS_WASM, + HAS_LAPACK64, + HAS_REFCOUNT, + IS_EDITABLE, + IS_INSTALLED, + IS_MUSL, IS_PYPY, IS_PYSTON, - IS_MUSL, - IS_EDITABLE, - HAS_REFCOUNT, - HAS_LAPACK64, - assert_equal, + IS_WASM, + NOGIL_BUILD, + NUMPY_ROOT, + IgnoreException, + KnownFailureException, + SkipTest, + assert_, + assert_allclose, assert_almost_equal, assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_compare, assert_array_equal, assert_array_less, - assert_string_equal, - assert_array_almost_equal, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, assert_raises, + assert_raises_regex, + assert_string_equal, + assert_warns, + break_cycles, build_err_msg, + check_support_sve, + clear_and_catch_warnings, decorate_methods, jiffies, + measure, memusage, print_assert_equal, + run_threaded, rundocs, runstring, - verbose, - measure, - assert_, - assert_array_almost_equal_nulp, - assert_raises_regex, - assert_array_max_ulp, - assert_warns, - assert_no_warnings, - assert_allclose, - IgnoreException, - clear_and_catch_warnings, - SkipTest, - KnownFailureException, - temppath, - tempdir, suppress_warnings, - assert_array_compare, - assert_no_gc_cycles, - break_cycles, - check_support_sve, - run_threaded, + tempdir, + temppath, + verbose, ) __all__ = [ - "assert_equal", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "IS_EDITABLE", + "IS_INSTALLED", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "NOGIL_BUILD", + "NUMPY_ROOT", + "IgnoreException", + "KnownFailureException", + "SkipTest", + "TestCase", + "assert_", + "assert_allclose", "assert_almost_equal", "assert_approx_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_compare", "assert_array_equal", "assert_array_less", - "assert_string_equal", - "assert_array_almost_equal", + "assert_array_max_ulp", + "assert_equal", + "assert_no_gc_cycles", + "assert_no_warnings", "assert_raises", + "assert_raises_regex", + "assert_string_equal", + "assert_warns", + "break_cycles", "build_err_msg", + "check_support_sve", + "clear_and_catch_warnings", "decorate_methods", "jiffies", + "measure", "memusage", + "overrides", "print_assert_equal", + "run_threaded", "rundocs", "runstring", - "verbose", - "measure", - "assert_", - "assert_array_almost_equal_nulp", - "assert_raises_regex", - "assert_array_max_ulp", - "assert_warns", - "assert_no_warnings", - "assert_allclose", - "IgnoreException", - "clear_and_catch_warnings", - "SkipTest", - "KnownFailureException", - "temppath", - "tempdir", - "IS_PYPY", - "HAS_REFCOUNT", - "IS_WASM", "suppress_warnings", - "assert_array_compare", - "assert_no_gc_cycles", - "break_cycles", - "HAS_LAPACK64", - "IS_PYSTON", - "IS_MUSL", - "check_support_sve", - "NOGIL_BUILD", - "IS_EDITABLE", - "run_threaded", - "TestCase", - "overrides", + "tempdir", + "temppath", + "verbose", ] diff --git a/numpy/testing/_private/__init__.pyi b/numpy/testing/_private/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi new file mode 100644 index 000000000000..609a45e79d16 --- /dev/null +++ b/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] = [], + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] = [], + library_dirs: Sequence[str] = [], +) -> pathlib.Path: ... diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4ebfb54bd563..42e43e21f37b 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -4,6 +4,7 @@ """ import os import sys +import pathlib import platform import re import gc @@ -18,6 +19,8 @@ import pprint import sysconfig import concurrent.futures +import threading +import importlib.metadata import numpy as np from numpy._core import ( @@ -25,9 +28,11 @@ from numpy import isfinite, isnan, isinf import numpy.linalg._umath_linalg from numpy._utils import _rename_parameter +from numpy._core.tests._natype import pd_NA from io import StringIO + __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', @@ -41,7 +46,7 @@ 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', - 'IS_EDITABLE', 'run_threaded', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', ] @@ -53,10 +58,40 @@ class KnownFailureException(Exception): KnownFailureTest = KnownFailureException # backwards compat verbose = 0 +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info >= (3, 13): + IS_EDITABLE = np_dist.origin.dir_info.editable + else: + # Backport importlib.metadata.Distribution.origin + import json, types # noqa: E401 + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + IS_WASM = platform.machine() in ["wasm32", "wasm64"] IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") -IS_EDITABLE = not bool(np.__path__) or 'editable' in np.__path__[0] HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 @@ -70,6 +105,7 @@ class KnownFailureException(Exception): IS_MUSL = True NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +IS_64BIT = np.dtype(np.intp).itemsize == 8 def assert_(val, msg=''): """ @@ -100,14 +136,15 @@ def GetPerformanceAttributes(object, counter, instance=None, # thread's CPU usage is either 0 or 100). To read counters like this, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + #(dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, - inum, counter)) + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) hq = win32pdh.OpenQuery() try: hc = win32pdh.AddCounter(hq, path) @@ -165,7 +202,7 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): l = f.readline().split(' ') return int(l[13]) except Exception: - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) else: # os.getpid is not in all platforms available. # Using time is safe but inaccurate, especially when process @@ -181,7 +218,7 @@ def jiffies(_load_time=[]): import time if not _load_time: _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) def build_err_msg(arrays, err_msg, header='Items are not equal:', @@ -189,7 +226,7 @@ def build_err_msg(arrays, err_msg, header='Items are not equal:', msg = ['\n' + header] err_msg = str(err_msg) if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): msg = [msg[0] + ' ' + err_msg] else: msg.append(err_msg) @@ -658,14 +695,14 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', # Normalized the numbers to be in range (-10.0,10.0) # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = 0.5 * (np.abs(desired) + np.abs(actual)) scale = np.power(10, np.floor(np.log10(scale))) try: - sc_desired = desired/scale + sc_desired = desired / scale except ZeroDivisionError: sc_desired = 0.0 try: - sc_actual = actual/scale + sc_actual = actual / scale except ZeroDivisionError: sc_actual = 0.0 msg = build_err_msg( @@ -686,7 +723,7 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', return except (TypeError, NotImplementedError): pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): raise AssertionError(msg) @@ -1378,10 +1415,10 @@ def check_support_sve(__cache=[]): """ gh-22982 """ - + if __cache: return __cache[0] - + import subprocess cmd = 'lscpu' try: @@ -1542,7 +1579,7 @@ def measure(code_str, times=1, label=None): i += 1 exec(code, globs, locs) elapsed = jiffies() - elapsed - return 0.01*elapsed + return 0.01 * elapsed def _assert_valid_refcount(op): @@ -1556,7 +1593,7 @@ def _assert_valid_refcount(op): import gc import numpy as np - b = np.arange(100*100).reshape(100, 100) + b = np.arange(100 * 100).reshape(100, 100) c = b i = 1 @@ -1734,7 +1771,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): + if not np.all(np.abs(x - y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = f"Arrays are not equal to {nulp} ULP" else: @@ -1850,7 +1887,7 @@ def nulp_diff(x, y, dtype=None): (x.shape, y.shape)) def _diff(rx, ry, vdt): - diff = np.asarray(rx-ry, dtype=vdt) + diff = np.asarray(rx - ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) @@ -2595,7 +2632,7 @@ def check_free_memory(free_bytes): except ValueError as exc: raise ValueError(f'Invalid environment variable {env_var}: {exc}') - msg = (f'{free_bytes/1e9} GB memory required, but environment variable ' + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' f'NPY_AVAILABLE_MEM={env_value} set') else: mem_free = _get_mem_available() @@ -2606,7 +2643,9 @@ def check_free_memory(free_bytes): "the test.") mem_free = -1 else: - msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available' + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' return msg if mem_free < free_bytes else None @@ -2684,12 +2723,38 @@ def _get_glibc_version(): _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) -def run_threaded(func, iters, pass_count=False): +def run_threaded(func, max_workers=8, pass_count=False, + pass_barrier=False, outer_iterations=1, + prepare_args=None): """Runs a function many times in parallel""" - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: - if pass_count: - futures = [tpe.submit(func, i) for i in range(iters)] - else: - futures = [tpe.submit(func) for _ in range(iters)] - for f in futures: - f.result() + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] + try: + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + finally: + if len(futures) < max_workers and pass_barrier: + barrier.abort() + for f in futures: + f.result() + + +def get_stringdtype_dtype(na_object, coerce=True): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index b2f4045c7703..75ea45d3a721 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -1,42 +1,42 @@ -import sys import ast +import sys import types -import warnings import unittest -from _typeshed import GenericPath, StrOrBytesPath, StrPath +import warnings from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager +from pathlib import Path from re import Pattern from typing import ( - Literal as L, Any, AnyStr, ClassVar, + Final, + Generic, NoReturn, + SupportsIndex, TypeAlias, overload, type_check_only, - TypeVar, - Final, - SupportsIndex, - ParamSpec ) +from typing import Literal as L +from unittest.case import SkipTest + +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath +from typing_extensions import ParamSpec, Self, TypeVar, TypeVarTuple, Unpack import numpy as np -from numpy import number, object_, _ConvertibleToFloat from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, + NDArray, + _ArrayLikeDT64_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, - _ArrayLikeDT64_co, ) -from unittest.case import SkipTest - -__all__ = [ +__all__ = [ # noqa: RUF022 "IS_EDITABLE", "IS_MUSL", "IS_PYPY", @@ -83,58 +83,33 @@ __all__ = [ "run_threaded", ] -_P = ParamSpec("_P") +### + _T = TypeVar("_T") -_ET = TypeVar("_ET", bound=BaseException) +_Ts = TypeVarTuple("_Ts") +_Tss = ParamSpec("_Tss") +_ET = TypeVar("_ET", bound=BaseException, default=BaseException) _FT = TypeVar("_FT", bound=Callable[..., Any]) +_W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) +_T_or_bool = TypeVar("_T_or_bool", default=bool) + +_StrLike: TypeAlias = str | bytes +_RegexLike: TypeAlias = _StrLike | Pattern[Any] +_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co -# Must return a bool or an ndarray/generic type -# that is supported by `np.logical_and.reduce` +_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] +_WarningSpec: TypeAlias = type[Warning] +_WarnLog: TypeAlias = list[warnings.WarningMessage] +_ToModules: TypeAlias = Iterable[types.ModuleType] + +# Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` _ComparisonFunc: TypeAlias = Callable[ [NDArray[Any], NDArray[Any]], - ( - bool - | np.bool - | number[Any] - | NDArray[np.bool | number[Any] | object_] - ) + bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], ] -class KnownFailureException(Exception): ... -class IgnoreException(Exception): ... - -class clear_and_catch_warnings(warnings.catch_warnings[list[warnings.WarningMessage]]): - class_modules: ClassVar[tuple[types.ModuleType, ...]] - modules: set[types.ModuleType] - @overload - def __new__( - cls, - record: L[False] = ..., - modules: Iterable[types.ModuleType] = ..., - ) -> _clear_and_catch_warnings_without_records: ... - @overload - def __new__( - cls, - record: L[True], - modules: Iterable[types.ModuleType] = ..., - ) -> _clear_and_catch_warnings_with_records: ... - @overload - def __new__( - cls, - record: bool, - modules: Iterable[types.ModuleType] = ..., - ) -> clear_and_catch_warnings: ... - def __enter__(self) -> None | list[warnings.WarningMessage]: ... - def __exit__( - self, - __exc_type: None | type[BaseException] = ..., - __exc_val: None | BaseException = ..., - __exc_tb: None | types.TracebackType = ..., - ) -> None: ... - # Type-check only `clear_and_catch_warnings` subclasses for both values of the # `record` parameter. Copied from the stdlib `warnings` stubs. - @type_check_only class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): def __enter__(self) -> list[warnings.WarningMessage]: ... @@ -143,321 +118,379 @@ class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): def __enter__(self) -> None: ... +### + +verbose: int = 0 +NUMPY_ROOT: Final[Path] = ... +IS_INSTALLED: Final[bool] = ... +IS_EDITABLE: Final[bool] = ... +IS_MUSL: Final[bool] = ... +IS_PYPY: Final[bool] = ... +IS_PYSTON: Final[bool] = ... +IS_WASM: Final[bool] = ... +HAS_REFCOUNT: Final[bool] = ... +HAS_LAPACK64: Final[bool] = ... +NOGIL_BUILD: Final[bool] = ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] + class_modules: ClassVar[tuple[types.ModuleType, ...]] = () + modules: Final[set[types.ModuleType]] + @overload # record: True + def __init__(self: clear_and_catch_warnings[_WarnLog], /, record: L[True], modules: _ToModules = ()) -> None: ... + @overload # record: False (default) + def __init__(self: clear_and_catch_warnings[None], /, record: L[False] = False, modules: _ToModules = ()) -> None: ... + @overload # record; bool + def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... + class suppress_warnings: - log: list[warnings.WarningMessage] - def __init__( - self, - forwarding_rule: L["always", "module", "once", "location"] = ..., - ) -> None: ... - def filter( - self, - category: type[Warning] = ..., - message: str = ..., - module: None | types.ModuleType = ..., - ) -> None: ... - def record( - self, - category: type[Warning] = ..., - message: str = ..., - module: None | types.ModuleType = ..., - ) -> list[warnings.WarningMessage]: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - __exc_type: None | type[BaseException] = ..., - __exc_val: None | BaseException = ..., - __exc_tb: None | types.TracebackType = ..., - ) -> None: ... - def __call__(self, func: _FT) -> _FT: ... - -verbose: int -IS_EDITABLE: Final[bool] -IS_MUSL: Final[bool] -IS_PYPY: Final[bool] -IS_PYSTON: Final[bool] -IS_WASM: Final[bool] -HAS_REFCOUNT: Final[bool] -HAS_LAPACK64: Final[bool] -NOGIL_BUILD: Final[bool] - -def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ... + log: Final[_WarnLog] + def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + def __call__(self, /, func: _FT) -> _FT: ... + + # + def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... + def record(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> _WarnLog: ... # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": def memusage(processName: str = ..., instance: int = ...) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> None | int: ... + def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies( - _proc_pid_stat: StrOrBytesPath = ..., - _load_time: list[float] = ..., - ) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... else: - def jiffies(_load_time: list[float] = ...) -> int: ... + def jiffies(_load_time: list[float] = []) -> int: ... +# def build_err_msg( arrays: Iterable[object], - err_msg: str, + err_msg: object, header: str = ..., verbose: bool = ..., names: Sequence[str] = ..., - precision: None | SupportsIndex = ..., + precision: SupportsIndex | None = ..., ) -> str: ... +# +def print_assert_equal(test_string: str, actual: object, desired: object) -> None: ... + +# +def assert_(val: object, msg: str | Callable[[], str] = "") -> None: ... + +# def assert_equal( actual: object, desired: object, - err_msg: object = ..., - verbose: bool = ..., + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... -) -> None: ... - -def print_assert_equal( - test_string: str, - actual: object, - desired: object, + strict: bool = False, ) -> None: ... def assert_almost_equal( - actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, - desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, - decimal: int = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: int = 7, + err_msg: object = "", + verbose: bool = True, ) -> None: ... -# Anything that can be coerced into `builtins.float` +# def assert_approx_equal( - actual: _ConvertibleToFloat, - desired: _ConvertibleToFloat, - significant: int = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: ConvertibleToFloat, + desired: ConvertibleToFloat, + significant: int = 7, + err_msg: object = "", + verbose: bool = True, ) -> None: ... +# def assert_array_compare( comparison: _ComparisonFunc, x: ArrayLike, y: ArrayLike, - err_msg: object = ..., - verbose: bool = ..., - header: str = ..., - precision: SupportsIndex = ..., - equal_nan: bool = ..., - equal_inf: bool = ..., + err_msg: object = "", + verbose: bool = True, + header: str = "", + precision: SupportsIndex = 6, + equal_nan: bool = True, + equal_inf: bool = True, *, - strict: bool = ... + strict: bool = False, + names: tuple[str, str] = ("ACTUAL", "DESIRED"), ) -> None: ... +# def assert_array_equal( - x: ArrayLike, - y: ArrayLike, - /, - err_msg: object = ..., - verbose: bool = ..., + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... +# def assert_array_almost_equal( - x: _ArrayLikeNumber_co | _ArrayLikeObject_co, - y: _ArrayLikeNumber_co | _ArrayLikeObject_co, - /, - decimal: float = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: float = 6, + err_msg: object = "", + verbose: bool = True, ) -> None: ... @overload def assert_array_less( - x: _ArrayLikeNumber_co | _ArrayLikeObject_co, - y: _ArrayLikeNumber_co | _ArrayLikeObject_co, - err_msg: object = ..., - verbose: bool = ..., + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_array_less( x: _ArrayLikeTD64_co, y: _ArrayLikeTD64_co, - err_msg: object = ..., - verbose: bool = ..., + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_array_less( - x: _ArrayLikeDT64_co, - y: _ArrayLikeDT64_co, - err_msg: object = ..., - verbose: bool = ..., + x: _NumericArrayLike, + y: _NumericArrayLike, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... -def runstring( - astr: str | bytes | types.CodeType, - dict: None | dict[str, Any], -) -> Any: ... - +# def assert_string_equal(actual: str, desired: str) -> None: ... -def rundocs( - filename: StrPath | None = ..., - raise_on_error: bool = ..., -) -> None: ... - -def check_support_sve(__cache: list[_T]) -> _T: ... - -def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ... - -@overload -def assert_raises( # type: ignore - expected_exception: type[BaseException] | tuple[type[BaseException], ...], - callable: Callable[_P, Any], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> None: ... +# @overload def assert_raises( - expected_exception: type[_ET] | tuple[type[_ET], ...], + exception_class: _ExceptionSpec[_ET], + /, *, - msg: None | str = ..., + msg: str | None = None, ) -> unittest.case._AssertRaisesContext[_ET]: ... - @overload -def assert_raises_regex( - expected_exception: type[BaseException] | tuple[type[BaseException], ...], - expected_regex: str | bytes | Pattern[Any], - callable: Callable[_P, Any], +def assert_raises( + exception_class: _ExceptionSpec, + callable: Callable[_Tss, Any], /, - *args: _P.args, - **kwargs: _P.kwargs, + *args: _Tss.args, + **kwargs: _Tss.kwargs, ) -> None: ... + +# @overload def assert_raises_regex( - expected_exception: type[_ET] | tuple[type[_ET], ...], - expected_regex: str | bytes | Pattern[Any], + exception_class: _ExceptionSpec[_ET], + expected_regexp: _RegexLike, *, - msg: None | str = ..., + msg: str | None = None, ) -> unittest.case._AssertRaisesContext[_ET]: ... - -def decorate_methods( - cls: type[Any], - decorator: Callable[[Callable[..., Any]], Any], - testmatch: None | str | bytes | Pattern[Any] = ..., +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec, + expected_regexp: _RegexLike, + callable: Callable[_Tss, Any], + *args: _Tss.args, + **kwargs: _Tss.kwargs, ) -> None: ... -def measure( - code_str: str | bytes | ast.mod | ast.AST, - times: int = ..., - label: None | str = ..., -) -> float: ... - +# @overload def assert_allclose( - actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, - desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, - rtol: float = ..., - atol: float = ..., - equal_nan: bool = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_allclose( - actual: _ArrayLikeTD64_co, - desired: _ArrayLikeTD64_co, - rtol: float = ..., - atol: float = ..., - equal_nan: bool = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... +# def assert_array_almost_equal_nulp( x: _ArrayLikeNumber_co, y: _ArrayLikeNumber_co, - nulp: float = ..., + nulp: float = 1, ) -> None: ... +# def assert_array_max_ulp( a: _ArrayLikeNumber_co, b: _ArrayLikeNumber_co, - maxulp: float = ..., - dtype: DTypeLike = ..., + maxulp: float = 1, + dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... +# @overload -def assert_warns(warning_class: type[Warning]) -> _GeneratorContextManager[None]: ... +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload -def assert_warns( - warning_class: type[Warning], - func: Callable[_P, _T], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> _T: ... +def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +# @overload def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings( - func: Callable[_P, _T], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> _T: ... +def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +# +@overload +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... + +### + +# @overload def tempdir( - suffix: None = ..., - prefix: None = ..., - dir: None = ..., + suffix: None = None, + prefix: None = None, + dir: None = None, ) -> _GeneratorContextManager[str]: ... @overload def tempdir( - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, ) -> _GeneratorContextManager[AnyStr]: ... +# @overload def temppath( - suffix: None = ..., - prefix: None = ..., - dir: None = ..., - text: bool = ..., + suffix: None = None, + prefix: None = None, + dir: None = None, + text: bool = False, ) -> _GeneratorContextManager[str]: ... @overload def temppath( - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., - text: bool = ..., + suffix: AnyStr | None, + prefix: AnyStr | None, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... +# +def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 + +# +def decorate_methods( + cls: type, + decorator: Callable[[Callable[..., Any]], Any], + testmatch: _RegexLike | None = None, +) -> None: ... + +# @overload -def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +def run_threaded( + func: Callable[[], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + prepare_args: None = None, +) -> None: ... @overload -def assert_no_gc_cycles( - func: Callable[_P, Any], - /, - *args: _P.args, - **kwargs: _P.kwargs, +def run_threaded( + func: Callable[[Unpack[_Ts]], None], + max_workers: int, + pass_count: bool, + pass_barrier: bool, + outer_iterations: int, + prepare_args: tuple[Unpack[_Ts]], +) -> None: ... +@overload +def run_threaded( + func: Callable[[Unpack[_Ts]], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + *, + prepare_args: tuple[Unpack[_Ts]], ) -> None: ... +# +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... +def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... def break_cycles() -> None: ... - -def run_threaded(func: Callable[[], None], iters: int, pass_count: bool = False) -> None: ... diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi new file mode 100644 index 000000000000..3fefc3f350da --- /dev/null +++ b/numpy/testing/overrides.pyi @@ -0,0 +1,11 @@ +from collections.abc import Callable, Hashable +from typing import Any + +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 000000000000..e6430304675e --- /dev/null +++ b/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,27 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic + +from typing_extensions import Self, TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index e5e6e4630633..b25818c62d31 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -1,3 +1,4 @@ +import functools import sys import sysconfig import subprocess @@ -681,7 +682,7 @@ def test_functions_single_location(): assert len(duplicated_functions) == 0, duplicated_functions -def test___module__attribute(): +def test___module___attribute(): modules_queue = [np] visited_modules = {np} visited_functions = set() @@ -701,7 +702,7 @@ def test___module__attribute(): member_name not in [ "char", "core", "ctypeslib", "f2py", "ma", "lapack_lite", "mrecords", "testing", "tests", "polynomial", "typing", - "random", # cython disallows overriding __module__ + "mtrand", "bit_generator", ] and member not in visited_modules # not visited yet ): @@ -728,6 +729,13 @@ def test___module__attribute(): ): continue + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + incorrect_entries.append( dict( Func=member.__name__, @@ -739,3 +747,64 @@ def test___module__attribute(): if incorrect_entries: assert len(incorrect_entries) == 0, incorrect_entries + + +def _check___qualname__(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + ( + # for bound methods check qualname match + module_name.startswith("numpy.random") and + actual_obj.__qualname__ == qualname + ) + ) + + +def test___qualname___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in [ + "f2py", "ma", "tests", "testing", "typing", + "bit_generator", "ctypeslib", "lapack_lite", + ] and # skip modules + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check___qualname__(member) and + member not in visited_functions + ): + incorrect_entries.append( + dict( + actual=member.__qualname__, expected=member.__name__, + ) + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index 3d250c493cfb..29f3ab4e28d3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -72,6 +72,11 @@ AR_i // AR_LIKE_m # E: Unsupported operand types AR_f // AR_LIKE_m # E: Unsupported operand types AR_c // AR_LIKE_m # E: Unsupported operand types +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] + # Array multiplication AR_b *= AR_LIKE_u # E: incompatible type diff --git a/numpy/typing/tests/data/fail/array_like.pyi b/numpy/typing/tests/data/fail/array_like.pyi index 133b5fd49700..a21101a993c7 100644 --- a/numpy/typing/tests/data/fail/array_like.pyi +++ b/numpy/typing/tests/data/fail/array_like.pyi @@ -14,3 +14,5 @@ scalar = np.int64(1) scalar.__array__(dtype=np.float64) # E: No overload variant array = np.array([1]) array.__array__(dtype=np.float64) # E: No overload variant + +array.setfield(np.eye(1), np.int32, (0, 1)) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index f8c8a3237816..486c11e79868 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -6,11 +6,11 @@ import numpy.typing as npt AR: npt.NDArray[np.float64] func1: Callable[[Any], str] -func2: Callable[[np.integer[Any]], str] +func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # E: Unexpected keyword argument -np.array2string(AR, legacy="1.14") # E: incompatible type -np.array2string(AR, sign="*") # E: incompatible type -np.array2string(AR, floatmode="default") # E: incompatible type -np.array2string(AR, formatter={"A": func1}) # E: incompatible type -np.array2string(AR, formatter={"float": func2}) # E: Incompatible types +np.array2string(AR, style=None) # E: No overload variant +np.array2string(AR, legacy="1.14") # E: No overload variant +np.array2string(AR, sign="*") # E: No overload variant +np.array2string(AR, floatmode="default") # E: No overload variant +np.array2string(AR, formatter={"A": func1}) # E: No overload variant +np.array2string(AR, formatter={"float": func2}) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/modules.pyi b/numpy/typing/tests/data/fail/modules.pyi index c86627e0c8ea..541be15b24ae 100644 --- a/numpy/typing/tests/data/fail/modules.pyi +++ b/numpy/typing/tests/data/fail/modules.pyi @@ -13,6 +13,5 @@ np.math # E: Module has no attribute # e.g. one must first execute `import numpy.lib.recfunctions` np.lib.recfunctions # E: Module has no attribute -np.__NUMPY_SETUP__ # E: Module has no attribute np.__deprecated_attrs__ # E: Module has no attribute np.__expired_functions__ # E: Module has no attribute diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 674b378829a0..489aefca7ffc 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -16,11 +16,6 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes -reveal_type(ctypes_obj.get_data()) # E: has no attribute -reveal_type(ctypes_obj.get_shape()) # E: has no attribute -reveal_type(ctypes_obj.get_strides()) # E: has no attribute -reveal_type(ctypes_obj.get_as_parameter()) # E: has no attribute - f8.argpartition(0) # E: has no attribute f8.diagonal() # E: has no attribute f8.dot(1) # E: has no attribute @@ -31,8 +26,6 @@ f8.setfield(2, np.float64) # E: has no attribute f8.sort() # E: has no attribute f8.trace() # E: has no attribute -AR_M.__int__() # E: Invalid self argument -AR_M.__float__() # E: Invalid self argument AR_M.__complex__() # E: Invalid self argument AR_b.__index__() # E: Invalid self argument diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 5c6ccb177fbb..e847d8d6c45a 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -28,7 +28,6 @@ np.float32(3j) # E: incompatible type np.float32([1.0, 0.0, 0.0]) # E: incompatible type np.complex64([]) # E: incompatible type -np.complex64(1, 2) # E: Too many arguments # TODO: protocols (can't check for non-existent protocols w/ __getattr__) np.datetime64(0) # E: No overload variant @@ -60,7 +59,7 @@ np.flexible(b"test") # E: Cannot instantiate abstract class np.float64(value=0.0) # E: Unexpected keyword argument np.int64(value=0) # E: Unexpected keyword argument np.uint64(value=0) # E: Unexpected keyword argument -np.complex128(value=0.0j) # E: Unexpected keyword argument +np.complex128(value=0.0j) # E: No overload variant np.str_(value='bob') # E: No overload variant np.bytes_(value=b'test') # E: No overload variant np.void(value=b'test') # E: No overload variant diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index e284501c9d67..25c3c2ecc0d7 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -22,11 +22,6 @@ np.strings.decode(AR_U) # E: incompatible type np.strings.join(AR_U, b"_") # E: incompatible type np.strings.join(AR_S, "_") # E: incompatible type -np.strings.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.strings.ljust(AR_S, 5, fillchar="a") # E: incompatible type -np.strings.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.strings.rjust(AR_S, 5, fillchar="a") # E: incompatible type - np.strings.lstrip(AR_U, b"a") # E: incompatible type np.strings.lstrip(AR_S, "a") # E: incompatible type np.strings.strip(AR_U, b"a") # E: incompatible type diff --git a/numpy/typing/tests/data/fail/testing.pyi b/numpy/typing/tests/data/fail/testing.pyi index 953670180203..f7eaa7d20836 100644 --- a/numpy/typing/tests/data/fail/testing.pyi +++ b/numpy/typing/tests/data/fail/testing.pyi @@ -3,7 +3,7 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] -def func() -> bool: ... +def func(x: object) -> bool: ... np.testing.assert_(True, msg=1) # E: incompatible type np.testing.build_err_msg(1, "test") # E: incompatible type @@ -20,9 +20,9 @@ np.testing.assert_allclose(AR_U, AR_U) # E: incompatible type np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # E: incompatible type np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type -np.testing.assert_warns(warning_class=RuntimeWarning, func=func) # E: No overload variant +np.testing.assert_warns(RuntimeWarning, func) # E: No overload variant np.testing.assert_no_warnings(func=func) # E: No overload variant -np.testing.assert_no_warnings(func, None) # E: Too many arguments -np.testing.assert_no_warnings(func, test=None) # E: No overload variant +np.testing.assert_no_warnings(func) # E: Too many arguments +np.testing.assert_no_warnings(func, y=None) # E: No overload variant np.testing.assert_no_gc_cycles(func=func) # E: No overload variant diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index 0babc321b32d..a461d8b660da 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import cast, Any import numpy as np c16 = np.complex128() @@ -30,6 +30,9 @@ AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1.0j]) +AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") +AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) @@ -66,6 +69,17 @@ AR_c > AR_f AR_c > AR_c +AR_S > AR_S +AR_S > b"" + +AR_T > AR_T +AR_T > AR_U +AR_T > "" + +AR_U > AR_U +AR_U > AR_T +AR_U > "" + AR_m > AR_b AR_m > AR_u AR_m > AR_i diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index 4c4c1195990a..dfc4ff2f314a 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -13,10 +13,6 @@ np.ndenumerate(AR_LIKE_f) np.ndenumerate(AR_LIKE_U) -np.ndenumerate(AR_i8).iter -np.ndenumerate(AR_LIKE_f).iter -np.ndenumerate(AR_LIKE_U).iter - next(np.ndenumerate(AR_i8)) next(np.ndenumerate(AR_LIKE_f)) next(np.ndenumerate(AR_LIKE_U)) diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 000000000000..62b7e85d7ff1 --- /dev/null +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 7b8ebea52a16..758626e18dd6 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -24,6 +24,8 @@ class SubClass(npt.NDArray[np.float64]): ... C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) D = np.ones(3).view(SubClass) +ctypes_obj = A.ctypes + i4.all() A.all() A.all(axis=0) @@ -174,3 +176,21 @@ class SubClass(npt.NDArray[np.float64]): ... complex(np.array(1.0, dtype=np.float64)) operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] + +# deprecated + +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_data() # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_shape() # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_strides() # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_as_parameter() # pyright: ignore[reportDeprecated] diff --git a/numpy/typing/tests/data/pass/nditer.py b/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 000000000000..25a5b44d7aec --- /dev/null +++ b/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 000000000000..03322e064be4 --- /dev/null +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,162 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any +from typing_extensions import assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((int(3),), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((int(2),), np.int_), + np.ones((int(3),), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((int(2),), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) + zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[Any, np.dtype[np.void]], + np.ndarray[Any, np.dtype[np.int_]], + ], + ) diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 01beb0b29f52..89f24cb92991 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -89,9 +89,18 @@ def __float__(self) -> float: np.datetime64("2019") np.datetime64(b"2019") np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) np.datetime64(np.datetime64()) np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") np.datetime64(None) np.datetime64(None, "D") diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 16c6e8eb5de5..8f44e6e76f83 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -71,8 +71,13 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_2d = np.ones((3, 3)) array_2d[:2, :2] -array_2d[..., 0] array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) # Other special methods len(array) @@ -80,8 +85,7 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_scalar = np.array(1) int(array_scalar) float(array_scalar) -# currently does not work due to https://github.com/python/typeshed/issues/1904 -# complex(array_scalar) +complex(array_scalar) bytes(array_scalar) operator.index(array_scalar) bool(array_scalar) diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index c1eee5d3fc29..c0b94bae08a1 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -50,7 +50,9 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] -AR_number: npt.NDArray[np.number[Any]] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] +AR_Any: npt.NDArray[Any] AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] @@ -61,18 +63,19 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] + # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]]) -assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_b - AR_LIKE_O, Any) -assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]]) @@ -80,7 +83,7 @@ assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_b, Any) -assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]]) @@ -88,7 +91,7 @@ assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_u - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]]) @@ -97,7 +100,7 @@ assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_u, Any) -assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]]) @@ -105,7 +108,7 @@ assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_i - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]]) @@ -114,32 +117,32 @@ assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_i, Any) -assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_f - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_LIKE_O - AR_f, Any) -assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) assert_type(AR_c - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) assert_type(AR_LIKE_O - AR_c, Any) assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) @@ -183,56 +186,142 @@ assert_type(AR_LIKE_m - AR_O, Any) assert_type(AR_LIKE_M - AR_O, Any) assert_type(AR_LIKE_O - AR_O, Any) +# Array "true" division + +assert_type(AR_f / b, npt.NDArray[np.float64]) +assert_type(AR_f / i, npt.NDArray[np.float64]) +assert_type(AR_f / f, npt.NDArray[np.float64]) + +assert_type(b / AR_f, npt.NDArray[np.float64]) +assert_type(i / AR_f, npt.NDArray[np.float64]) +assert_type(f / AR_f, npt.NDArray[np.float64]) + +assert_type(AR_b / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_b, Any) + +assert_type(AR_u / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_u, Any) + +assert_type(AR_i / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_i, Any) + +assert_type(AR_f / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_f, Any) + +assert_type(AR_m / AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_m, npt.NDArray[np.float64]) +assert_type(AR_m / AR_LIKE_O, Any) + +assert_type(AR_LIKE_m / AR_m, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_m, Any) + +assert_type(AR_O / AR_LIKE_b, Any) +assert_type(AR_O / AR_LIKE_u, Any) +assert_type(AR_O / AR_LIKE_i, Any) +assert_type(AR_O / AR_LIKE_f, Any) +assert_type(AR_O / AR_LIKE_m, Any) +assert_type(AR_O / AR_LIKE_M, Any) +assert_type(AR_O / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_O, Any) +assert_type(AR_LIKE_u / AR_O, Any) +assert_type(AR_LIKE_i / AR_O, Any) +assert_type(AR_LIKE_f / AR_O, Any) +assert_type(AR_LIKE_m / AR_O, Any) +assert_type(AR_LIKE_M / AR_O, Any) +assert_type(AR_LIKE_O / AR_O, Any) + # Array floor division assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) -assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_b // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) -assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_O // AR_b, Any) -assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_u // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_u, Any) -assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_i // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_i, Any) -assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_f, Any) @@ -306,6 +395,7 @@ assert_type(abs(m8_none), np.timedelta64[None]) assert_type(abs(m8_int), np.timedelta64[int]) assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) assert_type(abs(b_), np.bool) +assert_type(abs(AR_O), npt.NDArray[np.object_]) # Time structures @@ -407,20 +497,20 @@ assert_type(c16 + b_, np.complex128) assert_type(c16 + b, np.complex128) assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) -assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) -assert_type(i8 + c16, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c16, np.complex128) assert_type(c8 + c16, np.complex128 | np.complex64) assert_type(f4 + c16, np.complex128 | np.complex64) -assert_type(i4 + c16, np.complex128 | np.complex64) +assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) -assert_type(AR_f + c16, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) assert_type(c8 + c16, np.complex64 | np.complex128) @@ -433,7 +523,7 @@ assert_type(c8 + b_, np.complex64) assert_type(c8 + b, np.complex64) assert_type(c8 + c, np.complex64 | np.complex128) assert_type(c8 + f, np.complex64 | np.complex128) -assert_type(c8 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) assert_type(c16 + c8, np.complex128) @@ -446,7 +536,7 @@ assert_type(b_ + c8, np.complex64) assert_type(b + c8, np.complex64) assert_type(c + c8, np.complex64 | np.complex128) assert_type(f + c8, np.complex64 | np.complex128) -assert_type(AR_f + c8, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float @@ -459,18 +549,18 @@ assert_type(f8 + b_, np.float64) assert_type(f8 + b, np.float64) assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) -assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) -assert_type(i8 + f8, np.floating[_64Bit]) -assert_type(f4 + f8, np.floating[_32Bit] | np.float64) -assert_type(i4 + f8, np.floating[_32Bit] | np.float64) +assert_type(i8 + f8, np.float64) +assert_type(f4 + f8, np.float32 | np.float64) +assert_type(i4 + f8,np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) -assert_type(AR_f + f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + f8, npt.NDArray[np.float64]) assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) assert_type(f4 + f8, np.float32 | np.float64) @@ -481,7 +571,7 @@ assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) assert_type(f4 + c, np.complex64 | np.complex128) assert_type(f4 + f, np.float32 | np.float64) -assert_type(f4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) assert_type(f16 + f4, np.floating[_128Bit] | np.float32) assert_type(f8 + f4, np.float64) @@ -492,7 +582,7 @@ assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) assert_type(c + f4, np.complex64 | np.complex128) assert_type(f + f4, np.float64 | np.float32) -assert_type(AR_f + f4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int @@ -502,18 +592,18 @@ assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) -assert_type(i8 + c, np.complexfloating[_64Bit, _64Bit]) -assert_type(i8 + f, np.floating[_64Bit]) -assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i8 + c, np.complex128) +assert_type(i8 + f, np.float64) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) -assert_type(u8 + c, np.complexfloating[_64Bit, _64Bit]) -assert_type(u8 + f, np.floating[_64Bit]) -assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u8 + c, np.complex128) +assert_type(u8 + f, np.float64) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) @@ -521,24 +611,24 @@ assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) -assert_type(c + i8, np.complexfloating[_64Bit, _64Bit]) -assert_type(f + i8, np.floating[_64Bit]) -assert_type(AR_f + i8, npt.NDArray[np.floating[Any]]) +assert_type(c + i8, np.complex128) +assert_type(f + i8, np.float64) +assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(i4 + u8, Any) assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) -assert_type(c + u8, np.complexfloating[_64Bit, _64Bit]) -assert_type(f + u8, np.floating[_64Bit]) -assert_type(AR_f + u8, npt.NDArray[np.floating[Any]]) +assert_type(c + u8, np.complex128) +assert_type(f + u8, np.float64) +assert_type(AR_f + u8, npt.NDArray[np.float64]) assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) -assert_type(i4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) @@ -546,13 +636,13 @@ assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) -assert_type(u4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) -assert_type(AR_f + i4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i4, npt.NDArray[np.float64]) assert_type(i8 + u4, Any) assert_type(i4 + u4, Any) @@ -560,4 +650,28 @@ assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) -assert_type(AR_f + u4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index c6d56ab0de2d..35861cc0e942 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -203,26 +203,30 @@ assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) -assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[Any], ...]) -assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], ...]) -assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) -assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) -assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) -assert_type(np.vstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) -assert_type(np.hstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, A]), npt.NDArray[np.float64]) -assert_type(np.stack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 33793f8deebc..eabc7677cde9 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -2,10 +2,7 @@ from typing import Any import numpy as np import numpy.typing as npt -from numpy.lib._arraysetops_impl import ( - UniqueAllResult, UniqueCountsResult, UniqueInverseResult -) -from numpy._typing import _64Bit +from numpy.lib._arraysetops_impl import UniqueAllResult, UniqueCountsResult, UniqueInverseResult from typing_extensions import assert_type @@ -28,7 +25,7 @@ assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datet assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), - tuple[npt.NDArray[np.floating[_64Bit]], npt.NDArray[np.intp], npt.NDArray[np.intp]], + tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], ) assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 4cd6d4a11aff..da37778b177b 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -7,7 +7,7 @@ from typing import Any, Literal, TypeAlias import numpy as np from numpy.dtypes import StringDType -from typing_extensions import assert_type +from typing_extensions import LiteralString, assert_type # a combination of likely `object` dtype-like candidates (no `_co`) _PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta @@ -71,6 +71,8 @@ assert_type(np.dtype(Decimal), np.dtype[np.object_]) assert_type(np.dtype(Fraction), np.dtype[np.object_]) # char-codes +assert_type(np.dtype("?"), np.dtype[np.bool]) +assert_type(np.dtype("|b1"), np.dtype[np.bool]) assert_type(np.dtype("u1"), np.dtype[np.uint8]) assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) @@ -113,7 +115,7 @@ assert_type(dtype_U.base, np.dtype[Any]) assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]]) assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) assert_type(dtype_U.type, type[np.str_]) -assert_type(dtype_U.name, str) +assert_type(dtype_U.name, LiteralString) assert_type(dtype_U.names, None | tuple[str, ...]) assert_type(dtype_U * 0, np.dtype[np.str_]) diff --git a/numpy/typing/tests/data/reveal/false_positives.pyi b/numpy/typing/tests/data/reveal/false_positives.pyi deleted file mode 100644 index 7ae95e16a720..000000000000 --- a/numpy/typing/tests/data/reveal/false_positives.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Any - -import numpy as np -import numpy.typing as npt - -from typing_extensions import assert_type - -AR_Any: npt.NDArray[Any] - -# Mypy bug where overload ambiguity is ignored for `Any`-parametrized types; -# xref numpy/numpy#20099 and python/mypy#11347 -# -# The expected output would be something akin to `npt.NDArray[Any]` -assert_type(AR_Any + 2, npt.NDArray[np.signedinteger[Any]]) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 40bb578d0d46..7e778dc58410 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,6 +1,6 @@ """Tests for :mod:`_core.fromnumeric`.""" -from typing import Any, Literal as L, NoReturn +from typing import Any, Literal as L import numpy as np import numpy.typing as npt @@ -102,11 +102,11 @@ assert_type(np.searchsorted(AR_f4[0], 0), np.intp) assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) -assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.bool]]) -assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.float32]]) -assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[Any]]) -assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.bool]]) -assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.float32]]) +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype[Any]]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.squeeze(b), np.bool) assert_type(np.squeeze(f4), np.float32) @@ -127,11 +127,8 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(b), NoReturn) -assert_type(np.nonzero(f4), NoReturn) assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_0d), NoReturn) assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 7f5dcf8ccc3e..06071feddd79 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -18,23 +18,17 @@ AR_O: npt.NDArray[np.object_] assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) -assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[np.object_]) - -assert_type(np.ndenumerate(AR_i8).iter, np.flatiter[npt.NDArray[np.int64]]) -assert_type(np.ndenumerate(AR_LIKE_f).iter, np.flatiter[npt.NDArray[np.float64]]) -assert_type(np.ndenumerate(AR_LIKE_U).iter, np.flatiter[npt.NDArray[np.str_]]) -assert_type(np.ndenumerate(AR_LIKE_O).iter, np.flatiter[npt.NDArray[np.object_]]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) -# this fails due to an unknown mypy bug -# assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) -assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[np.object_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) @@ -58,13 +52,13 @@ assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) -assert_type(np.index_exp[0:1], tuple[slice]) -assert_type(np.index_exp[0:1, None:3], tuple[slice, slice]) -assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, EllipsisType, list[int]]) +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) -assert_type(np.s_[0:1], slice) -assert_type(np.s_[0:1, None:3], tuple[slice, slice]) -assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, EllipsisType, list[int]]) +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 6267163e4280..9cd06a36f3e0 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -94,6 +94,15 @@ assert_type(np.diff("bob", n=0), str) assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` +assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) +assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` + assert_type(np.angle(f8), np.floating[Any]) assert_type(np.angle(AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating[Any]]) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index e7e6082753be..db79504fdd1f 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,39 +1,71 @@ import datetime as dt -from typing import Any +from typing import Literal as L + +from typing_extensions import assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit +from numpy._typing import _64Bit -from typing_extensions import assert_type +f8: np.float64 +i8: np.int64 +u8: np.uint64 -f8 = np.float64() -i8 = np.int64() -u8 = np.uint64() +f4: np.float32 +i4: np.int32 +u4: np.uint32 -f4 = np.float32() -i4 = np.int32() -u4 = np.uint32() +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] -td = np.timedelta64(0, "D") -b_ = np.bool() +b_: np.bool -b = bool() -f = float() -i = int() +b: bool +i: int +f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] # Time structures -assert_type(td % td, np.timedelta64[dt.timedelta]) -assert_type(AR_m % td, npt.NDArray[np.timedelta64]) -assert_type(td % AR_m, npt.NDArray[np.timedelta64]) - -assert_type(divmod(td, td), tuple[np.int64, np.timedelta64]) -assert_type(divmod(AR_m, td), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) -assert_type(divmod(td, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +assert_type(m_nat % m, np.timedelta64[None]) +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) +assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) # Bool @@ -47,11 +79,12 @@ assert_type(b_ % f8, np.float64) assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i), tuple[np.int_, np.int_]) -assert_type(divmod(b_, f), tuple[np.float64, np.float64]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i8), tuple[np.int64, np.int64]) -assert_type(divmod(b_, u8), tuple[np.uint64, np.uint64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) @@ -77,69 +110,72 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) assert_type(i8 % b, np.int64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f, np.floating[_64Bit]) -assert_type(i8 % f8, np.floating[_64Bit]) +assert_type(i8 % f, np.float64 | np.floating[_64Bit]) +assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) assert_type(i4 % i8, np.int64 | np.int32) assert_type(i4 % f8, np.float64 | np.float32) assert_type(i4 % i4, np.int32) assert_type(i4 % f4, np.float32) -assert_type(i8 % AR_b, npt.NDArray[np.signedinteger[Any]]) - -assert_type(divmod(i8, b), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) -assert_type(divmod(i8, f), tuple[np.floating[_64Bit], np.floating[_64Bit]]) -assert_type(divmod(i8, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) -assert_type(divmod(i8, f8), tuple[np.floating[_64Bit], np.floating[_64Bit]]) -assert_type(divmod(i8, i4), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]] | tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(i4, i4), tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(i4, f4), tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) - -assert_type(b % i8, np.signedinteger[_64Bit]) -assert_type(f % i8, np.floating[_64Bit]) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) + +assert_type(divmod(i8, b), tuple[np.int64, np.int64]) +assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +assert_type(b % i8, np.int64) +assert_type(f % i8, np.float64 | np.floating[_64Bit]) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) assert_type(i8 % i4, np.int64 | np.int32) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) assert_type(f4 % i4, np.float32) -assert_type(AR_b % i8, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_b % i8, npt.NDArray[np.int64]) -assert_type(divmod(b, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) -assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]]) +assert_type(divmod(b, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]] | tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(f4, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(i4, i4), tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(f4, i4), tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_64Bit] | np.floating[_32Bit]) +assert_type(i8 % f4, np.floating[_64Bit] | np.float32) assert_type(f4 % f4, np.float32) -assert_type(f8 % AR_b, npt.NDArray[np.floating[Any]]) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) assert_type(divmod(f8, b), tuple[np.float64, np.float64]) assert_type(divmod(f8, f), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(b % f8, np.float64) -assert_type(f % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` assert_type(f8 % f8, np.float64) assert_type(f8 % f8, np.float64) assert_type(f4 % f4, np.float32) -assert_type(AR_b % f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f4, f8), tuple[np.float64, np.float64] | tuple[np.float32, np.float32]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 789585ec963b..49181d2c98a6 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -11,6 +11,7 @@ i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ # item assert_type(i0_nd.item(), int) @@ -29,8 +30,15 @@ assert_type(b1_0d.tolist(), bool) assert_type(u2_1d.tolist(), list[int]) assert_type(i4_2d.tolist(), list[list[int]]) assert_type(f8_3d.tolist(), list[list[list[float]]]) -assert_type(cG_4d.tolist(), complex | list[complex] | list[list[complex]] | list[list[list[Any]]]) -assert_type(i0_nd.tolist(), int | list[int] | list[list[int]] | list[list[list[Any]]]) +assert_type(cG_4d.tolist(), Any) +assert_type(i0_nd.tolist(), Any) + +# regression tests for numpy/numpy#27944 +any_dtype: np.ndarray[Any, Any] +any_sctype: np.ndarray[Any, Any] +assert_type(any_dtype.tolist(), Any) +assert_type(any_sctype.tolist(), Any) + # itemset does not return a value # tostring is pretty simple @@ -50,6 +58,13 @@ assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np. assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic[Any]]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[Any]]) + # byteswap assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 742ec2a4c827..90e6674a85e3 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -31,7 +31,7 @@ C: SubClass assert_type(np.count_nonzero(i8), int) assert_type(np.count_nonzero(AR_i8), int) assert_type(np.count_nonzero(B), int) -assert_type(np.count_nonzero(AR_i8, keepdims=True), Any) +assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 03b0712d8c77..4c1c8abd927c 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -504,8 +504,8 @@ assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) -assert_type(def_gen.integers(0, 100), int) -assert_type(def_gen.integers(100), int) +assert_type(def_gen.integers(0, 100), np.int64) +assert_type(def_gen.integers(100), np.int64) assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index 13db0a969773..1b88f6b46316 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -7,7 +7,7 @@ import numpy.typing as npt from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] -REC_AR_V: np.recarray[Any, np.dtype[np.record]] +REC_AR_V: np.recarray[tuple[int, ...], np.dtype[np.record]] AR_LIST: list[npt.NDArray[np.int64]] record: np.record diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 649902f0c6d3..9339456b61ae 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -67,27 +67,27 @@ assert_type(np.strings.expandtabs(AR_T), AR_T_alias) assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) -assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) -assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) assert_type(np.strings.lstrip(AR_T), AR_T_alias) -assert_type(np.strings.lstrip(AR_T, "_"), AR_TU_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_T_alias) assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) assert_type(np.strings.rstrip(AR_T), AR_T_alias) -assert_type(np.strings.rstrip(AR_T, "_"), AR_TU_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_T_alias) assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) assert_type(np.strings.strip(AR_T), AR_T_alias) -assert_type(np.strings.strip(AR_T, "_"), AR_TU_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_T_alias) assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 5301090a5f4b..741c71f62a5b 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -32,15 +32,15 @@ assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) assert_type( np.testing.clear_and_catch_warnings(modules=[np.testing]), - np.testing._private.utils._clear_and_catch_warnings_without_records, + np.testing.clear_and_catch_warnings[None], ) assert_type( np.testing.clear_and_catch_warnings(True), - np.testing._private.utils._clear_and_catch_warnings_with_records, + np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]], ) assert_type( np.testing.clear_and_catch_warnings(False), - np.testing._private.utils._clear_and_catch_warnings_without_records, + np.testing.clear_and_catch_warnings[None], ) assert_type( np.testing.clear_and_catch_warnings(bool_obj), diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index fc2345289236..8d3527ac8415 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -14,6 +14,7 @@ assert_type(np.absolute.__doc__, str) assert_type(np.absolute.types, list[str]) assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) assert_type(np.absolute.ntypes, Literal[20]) assert_type(np.absolute.identity, None) assert_type(np.absolute.nin, Literal[1]) @@ -26,6 +27,7 @@ assert_type(np.absolute(AR_f8), npt.NDArray[Any]) assert_type(np.absolute.at(AR_f8, AR_i8), None) assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) assert_type(np.add.ntypes, Literal[22]) assert_type(np.add.identity, Literal[0]) assert_type(np.add.nin, Literal[2]) @@ -42,6 +44,7 @@ assert_type(np.add.outer(f8, f8), Any) assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) assert_type(np.frexp.ntypes, Literal[4]) assert_type(np.frexp.identity, None) assert_type(np.frexp.nin, Literal[1]) @@ -52,6 +55,7 @@ assert_type(np.frexp(f8), tuple[Any, Any]) assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) assert_type(np.divmod.ntypes, Literal[15]) assert_type(np.divmod.identity, None) assert_type(np.divmod.nin, Literal[2]) @@ -62,6 +66,7 @@ assert_type(np.divmod(f8, f8), tuple[Any, Any]) assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) assert_type(np.matmul.ntypes, Literal[19]) assert_type(np.matmul.identity, None) assert_type(np.matmul.nin, Literal[2]) @@ -73,6 +78,7 @@ assert_type(np.matmul(AR_f8, AR_f8), Any) assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) assert_type(np.vecdot.ntypes, Literal[19]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot.nin, Literal[2]) @@ -82,7 +88,8 @@ assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot(AR_f8, AR_f8), Any) -assert_type(np.bitwise_count.__name__, Literal['bitwise_count']) +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) assert_type(np.bitwise_count.ntypes, Literal[11]) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count.nin, Literal[1]) diff --git a/pavement.py b/pavement.py index e8e63ee89f97..804749df9626 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.7-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 73e2021d9e95..2bfedb9c8b20 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.0.dev0" +version = "2.2.7" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 437dbc90a9b7..5a7be719214a 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.28.0.2 +scipy-openblas32==0.3.29.0.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index ab255e648527..adf7d86558f0 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.28.0.2 -scipy-openblas64==0.3.28.0.2 +scipy-openblas32==0.3.29.0.0 +scipy-openblas64==0.3.29.0.0 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 74ef448182af..4dcf2a788df0 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -18,4 +18,4 @@ towncrier toml # for doctests, also needs pytz which is in test_requirements -scipy-doctest +scipy-doctest==1.5.1 diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index dc28402d2cb5..93e441f61310 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -9,12 +9,13 @@ pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" pytest-xdist +pytest-timeout # for numpy.random.test.test_extending cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.13.0; platform_python_implementation != "PyPy" +mypy==1.14.1; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer diff --git a/tools/changelog.py b/tools/changelog.py index b065cda9f399..4498bb93bd9a 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -135,7 +135,7 @@ def main(token, revision_range): def backtick_repl(matchobj): """repl to add an escaped space following a code block if needed""" if matchobj.group(2) != ' ': - post = r'\ ' + matchobj.group(2) + post = r' ' + matchobj.group(2) else: post = matchobj.group(2) return '``' + matchobj.group(1) + '``' + post diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 46fed5bbf0c4..180770451c44 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -67,7 +67,7 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-14-0 + image: family/freebsd-14-2 platform: freebsd cpu: 1 memory: 4G diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 4b06e5776612..aa1063d9f81d 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -78,7 +78,7 @@ macosx_arm64_task: build_script: | brew install micromamba gfortran - micromamba shell init -s bash -p ~/micromamba + micromamba shell init -s bash --root-prefix ~/micromamba source ~/.bash_profile micromamba create -n numpydev diff --git a/tools/ci/tsan_suppressions.txt b/tools/ci/tsan_suppressions.txt new file mode 100644 index 000000000000..0745debd8e5f --- /dev/null +++ b/tools/ci/tsan_suppressions.txt @@ -0,0 +1,11 @@ +# This file contains suppressions for the TSAN tool +# +# Reference: https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions + +# For np.nonzero, see gh-28361 +race:PyArray_Nonzero +race:count_nonzero_int +race:count_nonzero_bool +race:count_nonzero_float +race:DOUBLE_nonzero + diff --git a/vendored-meson/meson b/vendored-meson/meson index 0d93515fb826..7300f5fd4c1c 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 0d93515fb826440d19707eee47fd92655fe2f166 +Subproject commit 7300f5fd4c1c8b0406faeec4cc631f11f1ea324c