From be0518deac6d4507dc425a256de24c79d773f5b8 Mon Sep 17 00:00:00 2001 From: mayeut Date: Sun, 24 Nov 2024 10:43:41 +0100 Subject: [PATCH 001/187] CI: skip ninja installation in linux_qemu workflows The ninja used in the workflow is the one from the host. Skipping ninja installation in the container allows to workaround issues that could arise when building it from source as is currently the case with riscv64. --- .github/workflows/linux_qemu.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 32d2063bd8ec..ecc26e4a0862 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -141,7 +141,8 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && - python -m pip install -r /numpy/requirements/build_requirements.txt && + grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && + python -m pip install -r /tmp/build_requirements.txt && python -m pip install pytest pytest-xdist hypothesis typing_extensions && rm -f /usr/local/bin/ninja && mkdir -p /usr/local/bin && ln -s /host/usr/bin/ninja /usr/local/bin/ninja " From c39a2cbe23b651611ba798233ac090e6fefa4f3e Mon Sep 17 00:00:00 2001 From: Matthieu Darbois Date: Sun, 24 Nov 2024 15:56:16 +0100 Subject: [PATCH 002/187] add comment Co-authored-by: Matti Picus --- .github/workflows/linux_qemu.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index ecc26e4a0862..4ef74bcfa7f8 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -141,6 +141,7 @@ jobs: rm -f /usr/bin/ld.bfd && ln -s /host/usr/bin/${TOOLCHAIN_NAME}-ld.bfd /usr/bin/ld.bfd && rm -f /usr/bin/ninja && ln -s /host/usr/bin/ninja /usr/bin/ninja && git config --global --add safe.directory /numpy && + # No need to build ninja from source, the host ninja is used for the build grep -v ninja /numpy/requirements/build_requirements.txt > /tmp/build_requirements.txt && python -m pip install -r /tmp/build_requirements.txt && python -m pip install pytest pytest-xdist hypothesis typing_extensions && From 54b241af63bb72be853df94ed9bd90d1c9338ca5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 24 Nov 2024 20:03:44 +0200 Subject: [PATCH 003/187] CI: update circleci to python3.11.10, limit parallel builds. (#27826) * CI: update circleci to python3.12.7, ubuntu 20.04.3 [skip actions][skip azp] * limit to 2 parallel build jobs * using python3.12 fails to build numpy.distutils, use 3.11 instead * typo --- .circleci/config.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e6ec8cc783bd..8c2b443f1e84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.8 + - image: cimg/python:3.11.10 working_directory: ~/repo @@ -60,7 +60,7 @@ jobs: # get newer, pre-release versions of critical packages pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above - spin build --with-scipy-openblas=64 + spin build --with-scipy-openblas=64 -j 2 - run: name: build devdocs w/ref warnings @@ -97,8 +97,8 @@ jobs: # - validates ReST blocks (via validate_rst_syntax) # - checks that all of a module's `__all__` is reflected in the # module-level docstring autosummary - echo calling python tools/refguide_check.py -v - python tools/refguide_check.py -v + echo calling python3 tools/refguide_check.py -v + python3 tools/refguide_check.py -v - persist_to_workspace: root: ~/repo From 064b6f83b81a0701ca90ef1b56a5338f969b6ef5 Mon Sep 17 00:00:00 2001 From: Nitish Satyavolu Date: Sat, 23 Nov 2024 18:00:05 -0800 Subject: [PATCH 004/187] BUG: Fix mismatch in definition and declaration for a couple functions --- numpy/_core/src/common/numpyos.c | 2 +- numpy/_core/src/multiarray/compiled_base.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/common/numpyos.c b/numpy/_core/src/common/numpyos.c index a5ca28081d52..5bb250e22625 100644 --- a/numpy/_core/src/common/numpyos.c +++ b/numpy/_core/src/common/numpyos.c @@ -416,7 +416,7 @@ NumPyOS_ascii_isupper(char c) * Same as tolower under C locale */ NPY_NO_EXPORT int -NumPyOS_ascii_tolower(int c) +NumPyOS_ascii_tolower(char c) { if (c >= 'A' && c <= 'Z') { return c + ('a'-'A'); diff --git a/numpy/_core/src/multiarray/compiled_base.h b/numpy/_core/src/multiarray/compiled_base.h index e0e73ac798bf..b8081c8d3a55 100644 --- a/numpy/_core/src/multiarray/compiled_base.h +++ b/numpy/_core/src/multiarray/compiled_base.h @@ -10,9 +10,9 @@ arr_bincount(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr__monotonicity(PyObject *, PyObject *, PyObject *kwds); NPY_NO_EXPORT PyObject * -arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * -arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *, PyObject *); +arr_interp_complex(PyObject *, PyObject *const *, Py_ssize_t, PyObject *); NPY_NO_EXPORT PyObject * arr_ravel_multi_index(PyObject *, PyObject *, PyObject *); NPY_NO_EXPORT PyObject * From 705f00a1994e45880fc03a66c861308dfa97b067 Mon Sep 17 00:00:00 2001 From: Nitish Satyavolu Date: Sat, 23 Nov 2024 22:43:18 -0800 Subject: [PATCH 005/187] BUG: Fix declaration of NumPyOS_ascii_tolower to make it consistent with C standard library tolower --- numpy/_core/src/common/numpyos.c | 2 +- numpy/_core/src/common/numpyos.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/common/numpyos.c b/numpy/_core/src/common/numpyos.c index 5bb250e22625..a5ca28081d52 100644 --- a/numpy/_core/src/common/numpyos.c +++ b/numpy/_core/src/common/numpyos.c @@ -416,7 +416,7 @@ NumPyOS_ascii_isupper(char c) * Same as tolower under C locale */ NPY_NO_EXPORT int -NumPyOS_ascii_tolower(char c) +NumPyOS_ascii_tolower(int c) { if (c >= 'A' && c <= 'Z') { return c + ('a'-'A'); diff --git a/numpy/_core/src/common/numpyos.h b/numpy/_core/src/common/numpyos.h index fac82f7d438c..8fbecb122577 100644 --- a/numpy/_core/src/common/numpyos.h +++ b/numpy/_core/src/common/numpyos.h @@ -51,7 +51,7 @@ NPY_NO_EXPORT int NumPyOS_ascii_isupper(char c); NPY_NO_EXPORT int -NumPyOS_ascii_tolower(char c); +NumPyOS_ascii_tolower(int c); /* Convert a string to an int in an arbitrary base */ NPY_NO_EXPORT npy_longlong From a53df16623206b5fef940d5ec8a0ba68267b3298 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 25 Nov 2024 16:35:32 +0200 Subject: [PATCH 006/187] DOC: Correct version-added for mean arg for nanvar and nanstd [skip cirrus] [skip azp] [skip actions] --- numpy/lib/_nanfunctions_impl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index cc90523f15cd..9d0173dbe340 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -1761,7 +1761,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this var function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them @@ -1958,7 +1958,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, The axis for the calculation of the mean should be the same as used in the call to this std function. - .. versionadded:: 1.26.0 + .. versionadded:: 2.0.0 correction : {int, float}, optional Array API compatible name for the ``ddof`` parameter. Only one of them From de865dfb8c722fc84eb7acf7c6084a7cc833ca33 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Mon, 25 Nov 2024 16:33:45 +0100 Subject: [PATCH 007/187] BUG: Never negate strides in reductions (for now) Straight forward fix (and trying to consolidate tests... The annoying part is that stride-reordering only kicks in when `out=` is passed. ---- Notes on strides negating: That is partially understandable, because a new array cannot have negative strides (`free(arr.data)` needs to work) and maybe we don't want to drag around negative strides indefinitely (if we did, maybe `arr.dealloc()` should deal with it.) It also makes the whole mechanism a bit a confusing, right now... * For reduction axes it clearly doesn't matter if we allocate or not. * It is rare that an allocator doesn't allocate any array... so the whole mechanism seems currently rather useless... --- numpy/_core/src/umath/reduction.c | 9 ++- numpy/_core/tests/test_ufunc.py | 101 +++++++++++------------------- 2 files changed, 42 insertions(+), 68 deletions(-) diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 548530e1ca3b..1d3937eee1eb 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -218,10 +218,13 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, NPY_ITER_ZEROSIZE_OK | NPY_ITER_REFS_OK | NPY_ITER_DELAY_BUFALLOC | + /* + * stride negation (if reorderable) could currently misalign the + * first-visit and initial value copy logic. + */ + NPY_ITER_DONT_NEGATE_STRIDES | NPY_ITER_COPY_IF_OVERLAP; - if (!(context->method->flags & NPY_METH_IS_REORDERABLE)) { - it_flags |= NPY_ITER_DONT_NEGATE_STRIDES; - } + op_flags[0] = NPY_ITER_READWRITE | NPY_ITER_ALIGNED | NPY_ITER_ALLOCATE | diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 43037f20e2f6..e3f3f012963b 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -1644,51 +1644,46 @@ def test_where_with_broadcasting(self): assert_array_equal((a[where] < b_where), out[where].astype(bool)) assert not out[~where].any() # outside mask, out remains all 0 - def check_identityless_reduction(self, a): - # np.minimum.reduce is an identityless reduction + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3*4*5*8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + yield a - # Verify that it sees the zero at various positions + @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, a, pos): + # np.minimum.reduce is an identityless reduction a[...] = 1 - a[1, 0, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0]) - assert_equal(np.minimum.reduce(a, axis=0), - [[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 1, 1, 1], [0, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 1, 1], [0, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + a[pos] = 0 - a[...] = 1 - a[0, 1, 0] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[0, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[1, 0, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) - a[...] = 1 - a[0, 0, 1] = 0 - assert_equal(np.minimum.reduce(a, axis=None), 0) - assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1]) - assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1]) - assert_equal(np.minimum.reduce(a, axis=0), - [[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=1), - [[1, 0, 1, 1], [1, 1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=2), - [[0, 1, 1], [1, 1, 1]]) - assert_equal(np.minimum.reduce(a, axis=()), a) + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) @requires_memory(6 * 1024**3) @pytest.mark.skipif(sys.maxsize < 2**32, @@ -1703,30 +1698,6 @@ def test_identityless_reduction_huge_array(self): assert res[0] == 3 assert res[-1] == 4 - def test_identityless_reduction_corder(self): - a = np.empty((2, 3, 4), order='C') - self.check_identityless_reduction(a) - - def test_identityless_reduction_forder(self): - a = np.empty((2, 3, 4), order='F') - self.check_identityless_reduction(a) - - def test_identityless_reduction_otherorder(self): - a = np.empty((2, 4, 3), order='C').swapaxes(1, 2) - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig(self): - a = np.empty((3, 5, 4), order='C').swapaxes(1, 2) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - - def test_identityless_reduction_noncontig_unaligned(self): - a = np.empty((3*4*5*8 + 1,), dtype='i1') - a = a[1:].view(dtype='f8') - a.shape = (3, 4, 5) - a = a[1:, 1:, 1:] - self.check_identityless_reduction(a) - def test_reduce_identity_depends_on_loop(self): """ The type of the result should always depend on the selected loop, not From 391e5def375db6f0d1098fedf410f69cb7b8f5f6 Mon Sep 17 00:00:00 2001 From: Marten van Kerkwijk Date: Mon, 22 Jan 2024 17:17:18 -0500 Subject: [PATCH 008/187] ENH: define matvec and vecmat gufuncs Internally, they mostly just call the relevant blas, or vecdot routines. --- benchmarks/benchmarks/bench_ufunc.py | 6 +- .../upcoming_changes/25675.new_feature.rst | 20 ++ doc/source/reference/routines.linalg.rst | 2 + numpy/__init__.py | 12 +- numpy/__init__.pyi | 2 + numpy/_core/code_generators/generate_umath.py | 16 ++ .../_core/code_generators/ufunc_docstrings.py | 146 +++++++++++++- numpy/_core/multiarray.py | 10 +- numpy/_core/src/umath/matmul.c.src | 186 +++++++++++++++++- numpy/_core/src/umath/matmul.h.src | 13 +- numpy/_core/tests/test_ufunc.py | 86 ++++++-- numpy/_core/umath.py | 4 +- 12 files changed, 449 insertions(+), 54 deletions(-) create mode 100644 doc/release/upcoming_changes/25675.new_feature.rst diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 42d32a3ce3b5..895c8e931590 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -16,12 +16,12 @@ 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'mod', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'true_divide', 'trunc', 'vecdot'] + 'true_divide', 'trunc', 'vecdot', 'vecmat'] arrayfuncdisp = ['real', 'round'] for name in ufuncs: diff --git a/doc/release/upcoming_changes/25675.new_feature.rst b/doc/release/upcoming_changes/25675.new_feature.rst new file mode 100644 index 000000000000..f048e1f1020a --- /dev/null +++ b/doc/release/upcoming_changes/25675.new_feature.rst @@ -0,0 +1,20 @@ +New functions for matrix-vector and vector-matrix products +---------------------------------------------------------- + +Two new generalized ufuncs were defined: + +* `numpy.matvec` - matrix-vector product, treating the arguments as + stacks of matrices and column vectors, respectively. + +* `numpy.vecmat` - vector-matrix product, treating the arguments as + stacks of column vectors and matrices, respectively. For complex + vectors, the conjugate is taken. + +These add to the existing `numpy.matmul` as well as to `numpy.vecdot`, +which was added in numpy 2.0. + +Note that `numpy.matmul` never takes a complex conjugate, also not +when its left input is a vector, while both `numpy.vecdot` and +`numpy.vecmat` do take the conjugate for complex vectors on the +left-hand side (which are taken to be the ones that are transposed, +following the physics convention). diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst index 49c1ea7bce7a..d4fd7f9e0677 100644 --- a/doc/source/reference/routines.linalg.rst +++ b/doc/source/reference/routines.linalg.rst @@ -62,6 +62,8 @@ Matrix and vector products outer matmul linalg.matmul (Array API compatible location) + matvec + vecmat tensordot linalg.tensordot (Array API compatible location) einsum diff --git a/numpy/__init__.py b/numpy/__init__.py index 13c899384842..411db19fc98b 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -151,10 +151,10 @@ left_shift, less, less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, - matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, - min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, - ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, - not_equal, number, object_, ones, ones_like, outer, partition, + matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap, + min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, + ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, + nonzero, not_equal, number, object_, ones, ones_like, outer, partition, permute_dims, pi, positive, pow, power, printoptions, prod, promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, reciprocal, record, remainder, repeat, require, reshape, resize, @@ -165,8 +165,8 @@ str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, void, - vstack, where, zeros, zeros_like + ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, + vecmat, void, vstack, where, zeros, zeros_like ) # NOTE: It's still under discussion whether these aliases diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 18dbf22e98ad..9d3492eac003 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4490,6 +4490,7 @@ logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] @@ -4519,6 +4520,7 @@ tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] abs = absolute acos = arccos diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index e5e7d1b76523..1ff1fd019936 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1152,6 +1152,22 @@ def english_upper(s): TD(O), signature='(n),(n)->()', ), +'matvec': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.matvec'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(m,n),(n)->(m)', + ), +'vecmat': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath.vecmat'), + "PyUFunc_SimpleUniformOperationTypeResolver", + TD(notimes_or_obj), + TD(O), + signature='(n),(n,m)->(m)', + ), 'str_len': Ufunc(1, 1, Zero, docstrings.get('numpy._core.umath.str_len'), diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index f17a1221b371..b157eb0683c6 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -44,7 +44,7 @@ def add_newdoc(place, name, doc): skip = ( # gufuncs do not use the OUT_SCALAR replacement strings - 'matmul', 'vecdot', + 'matmul', 'vecdot', 'matvec', 'vecmat', # clip has 3 inputs, which is not handled by this 'clip', ) @@ -2793,7 +2793,9 @@ def add_newdoc(place, name, doc): See Also -------- - vdot : Complex-conjugating dot product. + vecdot : Complex-conjugating dot product for stacks of vectors. + matvec : Matrix-vector product for stacks of matrices and vectors. + vecmat : Vector-matrix product for stacks of vectors and matrices. tensordot : Sum products over arbitrary axes. einsum : Einstein summation convention. dot : alternative matrix product with different broadcasting rules. @@ -2808,10 +2810,10 @@ def add_newdoc(place, name, doc): matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication - the prepended 1 is removed. + the prepended 1 is removed. (For stacks of vectors, use ``vecmat``.) - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication - the appended 1 is removed. + the appended 1 is removed. (For stacks of vectors, use ``matvec``.) ``matmul`` differs from ``dot`` in two important ways: @@ -2910,8 +2912,8 @@ def add_newdoc(place, name, doc): Input arrays, scalars not allowed. out : ndarray, optional A location into which the result is stored. If provided, it must have - a shape that the broadcasted shape of `x1` and `x2` with the last axis - removed. If not provided or None, a freshly-allocated array is used. + the broadcasted shape of `x1` and `x2` with the last axis removed. + If not provided or None, a freshly-allocated array is used. **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. @@ -2933,6 +2935,9 @@ def add_newdoc(place, name, doc): See Also -------- vdot : same but flattens arguments first + matmul : Matrix-matrix product. + vecmat : Vector-matrix product. + matvec : Matrix-vector product. einsum : Einstein summation convention. Examples @@ -2949,6 +2954,135 @@ def add_newdoc(place, name, doc): .. versionadded:: 2.0.0 """) +add_newdoc('numpy._core.umath', 'matvec', + """ + Matrix-vector dot product of two arrays. + + Given a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x1`` and + a vector (or stack of vectors) :math:`\\mathbf{v}` in ``x2``, the + matrix-vector product is defined as: + + .. math:: + \\mathbf{A} \\cdot \\mathbf{b} = \\sum_{j=0}^{n-1} A_{ij} v_j + + where the sum is over the last dimensions in ``x1`` and ``x2`` + (unless ``axes`` is specified). (For a matrix-vector product with the + vector conjugated, use ``np.vecmat(x2, x1.mT)``.) + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The matrix-vector product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + vecmat : Vector-matrix product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Rotate a set of vectors from Y to X along Z. + + >>> a = np.array([[0., 1., 0.], + ... [-1., 0., 0.], + ... [0., 0., 1.]]) + >>> v = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 1.], + ... [0., 6., 8.]]) + >>> np.matvec(a, v) + array([[ 0., -1., 0.], + [ 1., 0., 0.], + [ 0., 0., 1.], + [ 6., 0., 8.]]) + + .. versionadded:: 2.1.0 + """) + +add_newdoc('numpy._core.umath', 'vecmat', + """ + Vector-matrix dot product of two arrays. + + Given a vector (or stack of vector) :math:`\\mathbf{v}` in ``x1`` and + a matrix (or stack of matrices) :math:`\\mathbf{A}` in ``x2``, the + vector-matrix product is defined as: + + .. math:: + \\mathbf{b} \\cdot \\mathbf{A} = \\sum_{i=0}^{n-1} \\overline{v_i}A_{ij} + + where the sum is over the last dimension of ``x1`` and the one-but-last + dimensions in ``x2`` (unless `axes` is specified) and where + :math:`\\overline{v_i}` denotes the complex conjugate if :math:`v` + is complex and the identity otherwise. (For a non-conjugated vector-matrix + product, use ``np.matvec(x2.mT, x1)``.) + + Parameters + ---------- + x1, x2 : array_like + Input arrays, scalars not allowed. + out : ndarray, optional + A location into which the result is stored. If provided, it must have + the broadcasted shape of ``x1`` and ``x2`` with the summation axis + removed. If not provided or None, a freshly-allocated array is used. + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + y : ndarray + The vector-matrix product of the inputs. + + Raises + ------ + ValueError + If the last dimensions of ``x1`` and the one-but-last dimension of + ``x2`` are not the same size. + + If a scalar value is passed in. + + See Also + -------- + vecdot : Vector-vector product. + matvec : Matrix-vector product. + matmul : Matrix-matrix product. + einsum : Einstein summation convention. + + Examples + -------- + Project a vector along X and Y. + + >>> v = np.array([0., 4., 2.]) + >>> a = np.array([[1., 0., 0.], + ... [0., 1., 0.], + ... [0., 0., 0.]]) + >>> np.vecmat(v, a) + array([ 0., 4., 0.]) + + .. versionadded:: 2.1.0 + """) + add_newdoc('numpy._core.umath', 'modf', """ Return the fractional and integral parts of an array, element-wise. diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 449c3d2b4791..b50f319ecf0b 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -83,11 +83,11 @@ def _override___module__(): 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', - 'logical_xor', 'matmul', 'maximum', 'minimum', 'remainder', 'modf', - 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', 'power', - 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', 'sin', - 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'trunc', 'vecdot', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', ]: ufunc = namespace_names[ufunc_name] ufunc.__module__ = "numpy" diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index 37f990f970ed..f0f8b2f4153f 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -81,9 +81,9 @@ static const npy_cfloat oneF = 1.0f, zeroF = 0.0f; */ NPY_NO_EXPORT void @name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n, - void *ip2, npy_intp is2_n, npy_intp NPY_UNUSED(is2_p), - void *op, npy_intp op_m, npy_intp NPY_UNUSED(op_p), - npy_intp m, npy_intp n, npy_intp NPY_UNUSED(p)) + void *ip2, npy_intp is2_n, + void *op, npy_intp op_m, + npy_intp m, npy_intp n) { /* * Vector matrix multiplication -- Level 2 BLAS @@ -465,13 +465,12 @@ NPY_NO_EXPORT void op, os_m, os_p, dm, dn, dp); } else if (vector_matrix) { /* vector @ matrix, switch ip1, ip2, p and m */ - @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, is1_m, - op, os_p, os_m, dp, dn, dm); + @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, + op, os_p, dp, dn); } else if (matrix_vector) { /* matrix @ vector */ - @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, is2_p, - - op, os_m, os_p, dm, dn, dp); + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, + op, os_m, dm, dn); } else { /* column @ row, 2d output, no blas needed or non-blas-able input */ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n, @@ -655,3 +654,174 @@ NPY_NO_EXPORT void } } /**end repeat**/ + +#if defined(HAVE_CBLAS) +/* + * Blas complex vector-matrix product via gemm (gemv cannot conjugate the vector). + */ +/**begin repeat + * + * #name = CFLOAT, CDOUBLE# + * #typ = npy_cfloat, npy_cdouble# + * #prefix = c, z# + * #step1 = &oneF, &oneD# + * #step0 = &zeroF, &zeroD# + */ +NPY_NO_EXPORT void +@name@_vecmat_via_gemm(void *ip1, npy_intp is1_n, + void *ip2, npy_intp is2_n, npy_intp is2_m, + void *op, npy_intp os_m, + npy_intp n, npy_intp m) +{ + enum CBLAS_ORDER order = CblasRowMajor; + enum CBLAS_TRANSPOSE trans1, trans2; + CBLAS_INT N, M, lda, ldb, ldc; + assert(n <= BLAS_MAXSIZE && m <= BLAS_MAXSIZE); + N = (CBLAS_INT)n; + M = (CBLAS_INT)m; + + assert(os_m == sizeof(@typ@)); + ldc = (CBLAS_INT)m; + + assert(is_blasable2d(is1_n, sizeof(@typ@), n, 1, sizeof(@typ@))); + trans1 = CblasConjTrans; + lda = (CBLAS_INT)(is1_n / sizeof(@typ@)); + + if (is_blasable2d(is2_n, is2_m, n, m, sizeof(@typ@))) { + trans2 = CblasNoTrans; + ldb = (CBLAS_INT)(is2_n / sizeof(@typ@)); + } + else { + assert(is_blasable2d(is2_m, is2_n, m, n, sizeof(@typ@))); + trans2 = CblasTrans; + ldb = (CBLAS_INT)(is2_m / sizeof(@typ@)); + } + CBLAS_FUNC(cblas_@prefix@gemm)( + order, trans1, trans2, 1, M, N, @step1@, ip1, lda, + ip2, ldb, @step0@, op, ldc); +} +/**end repeat**/ +#endif + +/* + * matvec loops, using blas gemv if possible, and TYPE_dot implementations otherwise. + * signature is (m,n),(n)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dm = dimensions[1], dn = dimensions[2]; + npy_intp is1_m=steps[3], is1_n=steps[4], is2_n=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_c_blasable = is_blasable2d(is1_m, is1_n, dm, dn, sizeof(@typ@)); + npy_bool i1_f_blasable = is_blasable2d(is1_n, is1_m, dn, dm, sizeof(@typ@)); + npy_bool i2_blasable = is_blasable2d(is2_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool blasable = ((i1_c_blasable || i1_f_blasable) && i2_blasable + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { + @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, op, os_m, dm, dn); + continue; + } +#endif + /* + * Dot the different matrix rows with the vector to get output elements. + * (no conjugation for complex, unlike vecdot and vecmat) + */ + for (npy_intp j = 0; j < dm; j++, ip1 += is1_m, op += os_m) { + @TYPE@_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +} +/**end repeat**/ + +/* + * vecmat loops, using blas gemv for float and gemm for complex if possible, + * and TYPE_dot[c] implementations otherwise. + * Note that we cannot use gemv for complex, since we need to conjugate the vector. + * signature is (n),(n,m)->(m) + */ +/**begin repeat + * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, + * CFLOAT, CDOUBLE, CLONGDOUBLE, + * UBYTE, USHORT, UINT, ULONG, ULONGLONG, + * BYTE, SHORT, INT, LONG, LONGLONG, + * BOOL, OBJECT# + * #typ = npy_float,npy_double,npy_longdouble, npy_half, + * npy_cfloat, npy_cdouble, npy_clongdouble, + * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, + * npy_byte, npy_short, npy_int, npy_long, npy_longlong, + * npy_bool, npy_object# + * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13# + * #COMPLEX = 0*4, 1*3, 0*11, 1# + * #DOT = dot*4, dotc*3, dot*11, dotc# + * #CHECK_PYERR = 0*18, 1# + */ +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + npy_intp n_outer = dimensions[0]; + npy_intp s0=steps[0], s1=steps[1], s2=steps[2]; + npy_intp dn = dimensions[1], dm = dimensions[2]; + npy_intp is1_n=steps[3], is2_n=steps[4], is2_m=steps[5], os_m=steps[6]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE); + npy_bool i1_blasable = is_blasable2d(is1_n, sizeof(@typ@), dn, 1, sizeof(@typ@)); + npy_bool i2_c_blasable = is_blasable2d(is2_n, is2_m, dn, dm, sizeof(@typ@)); + npy_bool i2_f_blasable = is_blasable2d(is2_m, is2_n, dm, dn, sizeof(@typ@)); + npy_bool blasable = (i1_blasable && (i2_c_blasable || i2_f_blasable) + && !too_big_for_blas && dn > 1 && dm > 1); +#endif + for (npy_intp i = 0; i < n_outer; i++, + args[0] += s0, args[1] += s1, args[2] += s2) { + char *ip1=args[0], *ip2=args[1], *op=args[2]; +#if @USEBLAS@ && defined(HAVE_CBLAS) + if (blasable) { +#if @COMPLEX@ + /* For complex, use gemm so we can conjugate the vector */ + @TYPE@_vecmat_via_gemm(ip1, is1_n, ip2, is2_n, is2_m, op, os_m, dn, dm); +#else + /* For float, use gemv (hence flipped order) */ + @TYPE@_gemv(ip2, is2_m, is2_n, ip1, is1_n, op, os_m, dm, dn); +#endif + continue; + } +#endif + /* Dot the vector with different matrix columns to get output elements. */ + for (npy_intp j = 0; j < dm; j++, ip2 += is2_m, op += os_m) { + @TYPE@_@DOT@(ip1, is1_n, ip2, is2_n, op, dn, NULL); +#if @CHECK_PYERR@ + if (PyErr_Occurred()) { + return; + } +#endif + } + } +} +/**end repeat**/ diff --git a/numpy/_core/src/umath/matmul.h.src b/numpy/_core/src/umath/matmul.h.src index df3f549a545a..bff3d73c8993 100644 --- a/numpy/_core/src/umath/matmul.h.src +++ b/numpy/_core/src/umath/matmul.h.src @@ -7,15 +7,10 @@ **/ NPY_NO_EXPORT void @TYPE@_matmul(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); -/**end repeat**/ - -/**begin repeat - * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF, - * CFLOAT, CDOUBLE, CLONGDOUBLE, - * UBYTE, USHORT, UINT, ULONG, ULONGLONG, - * BYTE, SHORT, INT, LONG, LONGLONG, - * BOOL, OBJECT# - */ NPY_NO_EXPORT void @TYPE@_vecdot(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_matvec(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +NPY_NO_EXPORT void +@TYPE@_vecmat(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); /**end repeat**/ diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 43037f20e2f6..b03eb02f0f51 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -824,20 +824,77 @@ def test_vecdot(self): actual3 = np.vecdot(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) - def test_vecdot_complex(self): - arr1 = np.array([1, 2j, 3]) - arr2 = np.array([1, 2, 3]) + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) - actual = np.vecdot(arr1, arr2) - expected = np.array([10-4j]) assert_array_equal(actual, expected) - actual2 = np.vecdot(arr2, arr1) - assert_array_equal(actual2, expected.conj()) + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) - actual3 = np.vecdot(arr1.astype("object"), arr2.astype("object")) + actual3 = np.matvec(arr1.astype("object"), arr2) assert_array_equal(actual3, expected.astype("object")) + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1.+1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + def test_vecdot_subclass(self): class MySubclass(np.ndarray): pass @@ -2757,9 +2814,9 @@ def test_ufunc_noncontiguous(ufunc): # bool, object, datetime are too irregular for this simple test continue inp, out = typ.split('->') - args_c = [np.empty(6, t) for t in inp] - # non contiguous (3 step) - args_n = [np.empty(18, t)[::3] for t in inp] + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] # alignment != itemsize is possible. So create an array with such # an odd step manually. args_o = [] @@ -2767,10 +2824,9 @@ def test_ufunc_noncontiguous(ufunc): orig_dt = np.dtype(t) off_dt = f"S{orig_dt.alignment}" # offset by alignment dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) - args_o.append(np.empty(6, dtype=dtype)["t"]) - + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) for a in args_c + args_n + args_o: - a.flat = range(1,7) + a.flat = range(1, 37) with warnings.catch_warnings(record=True): warnings.filterwarnings("always") @@ -2788,7 +2844,7 @@ def test_ufunc_noncontiguous(ufunc): # since different algorithms (libm vs. intrinsics) can be used # for different input strides res_eps = np.finfo(dt).eps - tol = 2*res_eps + tol = 3*res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 8e51cd1694af..10278e52cbec 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -33,8 +33,8 @@ 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', - 'logical_or', 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', + 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', - 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] + 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat'] From 09cc67978b432b16a2f9e10a5314b47ae5e97fd7 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Mon, 25 Nov 2024 11:28:33 -0700 Subject: [PATCH 009/187] DOC: Correct versionadded for vecmat and matvec. [skip cirrus] [skip azp] [skip actions] --- numpy/_core/code_generators/ufunc_docstrings.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index b157eb0683c6..c9ef4b8d533b 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -2906,6 +2906,8 @@ def add_newdoc(place, name, doc): where :math:`\\overline{a_i}` denotes the complex conjugate if :math:`a_i` is complex and the identity otherwise. + .. versionadded:: 2.0.0 + Parameters ---------- x1, x2 : array_like @@ -2951,7 +2953,6 @@ def add_newdoc(place, name, doc): >>> np.vecdot(v, n) array([ 3., 8., 10.]) - .. versionadded:: 2.0.0 """) add_newdoc('numpy._core.umath', 'matvec', @@ -2969,6 +2970,8 @@ def add_newdoc(place, name, doc): (unless ``axes`` is specified). (For a matrix-vector product with the vector conjugated, use ``np.vecmat(x2, x1.mT)``.) + .. versionadded:: 2.2.0 + Parameters ---------- x1, x2 : array_like @@ -3017,7 +3020,6 @@ def add_newdoc(place, name, doc): [ 0., 0., 1.], [ 6., 0., 8.]]) - .. versionadded:: 2.1.0 """) add_newdoc('numpy._core.umath', 'vecmat', @@ -3037,6 +3039,8 @@ def add_newdoc(place, name, doc): is complex and the identity otherwise. (For a non-conjugated vector-matrix product, use ``np.matvec(x2.mT, x1)``.) + .. versionadded:: 2.2.0 + Parameters ---------- x1, x2 : array_like @@ -3080,7 +3084,6 @@ def add_newdoc(place, name, doc): >>> np.vecmat(v, a) array([ 0., 4., 0.]) - .. versionadded:: 2.1.0 """) add_newdoc('numpy._core.umath', 'modf', From 99cc034dc4b01fe184d72990753ac37ca7e89ba1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 1 Nov 2024 21:48:16 -0600 Subject: [PATCH 010/187] REL: Prepare for the NumPy 2.2.0rc1 release [wheel build] - Create 2.2.0-changelog.rst. - Update 2.2.0-notes.rst. - Update .mailmap. - Update pyproject.toml - Delete release note snippets --- .mailmap | 38 +- doc/changelog/2.2.0-changelog.rst | 428 ++++++++++++++++++ .../upcoming_changes/14622.improvement.rst | 4 - .../upcoming_changes/25675.new_feature.rst | 20 - doc/release/upcoming_changes/26766.change.rst | 2 - doc/release/upcoming_changes/27088.change.rst | 2 - .../upcoming_changes/27119.performance.rst | 4 - .../upcoming_changes/27147.performance.rst | 8 - doc/release/upcoming_changes/27156.change.rst | 9 - .../upcoming_changes/27160.expired.rst | 2 - doc/release/upcoming_changes/27334.change.rst | 9 - .../upcoming_changes/27420.new_feature.rst | 2 - doc/release/upcoming_changes/27482.change.rst | 8 - .../upcoming_changes/27636.improvement.rst | 3 - .../upcoming_changes/27661.compatibility.rst | 5 - .../upcoming_changes/27695.improvement.rst | 5 - .../upcoming_changes/27723.improvement.rst | 4 - .../upcoming_changes/27735.deprecation.rst | 2 - .../upcoming_changes/27735.new_feature.rst | 4 - .../upcoming_changes/27736.new_feature.rst | 3 - doc/release/upcoming_changes/27807.change.rst | 4 - .../upcoming_changes/27808.performance.rst | 2 - doc/source/release/2.2.0-notes.rst | 200 +++++++- pyproject.toml | 2 +- tools/changelog.py | 2 +- 25 files changed, 652 insertions(+), 120 deletions(-) create mode 100644 doc/changelog/2.2.0-changelog.rst delete mode 100644 doc/release/upcoming_changes/14622.improvement.rst delete mode 100644 doc/release/upcoming_changes/25675.new_feature.rst delete mode 100644 doc/release/upcoming_changes/26766.change.rst delete mode 100644 doc/release/upcoming_changes/27088.change.rst delete mode 100644 doc/release/upcoming_changes/27119.performance.rst delete mode 100644 doc/release/upcoming_changes/27147.performance.rst delete mode 100644 doc/release/upcoming_changes/27156.change.rst delete mode 100644 doc/release/upcoming_changes/27160.expired.rst delete mode 100644 doc/release/upcoming_changes/27334.change.rst delete mode 100644 doc/release/upcoming_changes/27420.new_feature.rst delete mode 100644 doc/release/upcoming_changes/27482.change.rst delete mode 100644 doc/release/upcoming_changes/27636.improvement.rst delete mode 100644 doc/release/upcoming_changes/27661.compatibility.rst delete mode 100644 doc/release/upcoming_changes/27695.improvement.rst delete mode 100644 doc/release/upcoming_changes/27723.improvement.rst delete mode 100644 doc/release/upcoming_changes/27735.deprecation.rst delete mode 100644 doc/release/upcoming_changes/27735.new_feature.rst delete mode 100644 doc/release/upcoming_changes/27736.new_feature.rst delete mode 100644 doc/release/upcoming_changes/27807.change.rst delete mode 100644 doc/release/upcoming_changes/27808.performance.rst diff --git a/.mailmap b/.mailmap index 23a556dd9fc4..ee897a292229 100644 --- a/.mailmap +++ b/.mailmap @@ -25,6 +25,8 @@ !dg3192 <113710955+dg3192@users.noreply.github.com> !ellaella12 !ellaella12 <120079323+ellaella12@users.noreply.github.com> +!fengluoqiuwu +!fengluoqiuwu <163119756+fengluoqiuwu@users.noreply.github.com> !h-vetinari !h6197627 <44726212+h6197627@users.noreply.github.com> !jbCodeHub @@ -58,6 +60,7 @@ !yetanothercheer Aaron Baecker Adrin Jalali +Abraham Medina Arun Kota Arun Kota Arun Kota Aarthi Agurusa @@ -142,6 +145,7 @@ Ashutosh Singh Ashutosh Singh <55102089+Ashutosh619-sudo@users.noreply.github.com> Åsmund Hjulstad Auke Wiggers +Austin Ran <504977925@qq.com> Badhri Narayanan Krishnakumar Bhavuk Kalra Bhavuk Kalra @@ -154,6 +158,7 @@ Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage +Benoit Prabel Bernie Gray Bertrand Lefebvre Bharat Raghunathan @@ -193,6 +198,7 @@ Chris Vavaliaris Christian Clauss Christopher Dahlin Christopher Hanley +Christoph Buchner Christoph Gohlke Christoph Gohlke Christoph Gohlke cgholke @@ -288,6 +294,8 @@ Gregory R. Lee Gregory R. Lee Guo Ci guoci Guo Shuai +Habiba Hye +Habiba Hye <145866308+HabibiHye@users.noreply.github.com> Hameer Abbasi Hannah Aizenman Han Genuit @@ -300,11 +308,13 @@ Hiroyuki V. Yamazaki Hugo van Kemenade Iantra Solari I-Shen Leong +Ishan Purekar Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst -Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> +Ishan Koradia +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -345,32 +355,34 @@ Jérôme Richard Jessé Pires Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com> -João Fontes Gonçalves -Johann Rohwer -Johann Rohwer jmrohwer -Johnathon Cusick Jhong-Ken Chen (陳仲肯) Jhong-Ken Chen (陳仲肯) <37182101+kennychenfs@users.noreply.github.com> +Johann Faouzi +Johann Rohwer +Johann Rohwer jmrohwer Johannes Hampp <42553970+euronion@users.noreply.github.com> +Johannes Kaisinger +Johannes Kaisinger Johannes Schönberger -Johann Faouzi John Darbyshire <24256554+attack68@users.noreply.github.com> <24256554+attack68@users.noreply.github.com> John Hagen John Kirkham John Kirkham +Johnathon Cusick Johnson Sun <20457146+j3soon@users.noreply.github.com> Jonas I. Liechti Jonas I. Liechti Jonas I. Liechti +Joren Hammudoglu +Jory Klaverstijn +Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Fox-Rabinovitz Joseph Martinot-Lagarde Joshua Himmens Joyce Brum -Joren Hammudoglu -Jory Klaverstijn -Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +João Fontes Gonçalves Julia Poo Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor @@ -386,6 +398,7 @@ Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karel Planken <71339309+kplanken@users.noreply.github.com> Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> Karthik Kaiplody Keller Meier @@ -398,6 +411,7 @@ Kerem Hallaç Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso +Kira Prokopenko Konrad Kapp Kristoffer Pedersen Kristoffer Pedersen @@ -526,6 +540,7 @@ Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík +Oscar Armas-Luy Óscar Villellas Guillén Pablo Losada Pablo Losada <48804010+TheHawz@users.noreply.github.com> @@ -546,6 +561,7 @@ Pearu Peterson Pete Peeradej Tanruangporn Peter Bell Peter J Cock +Peter Kämpf Peyton Murray Phil Elson Pierre GM @@ -608,6 +624,7 @@ Sebastian Schleehauf Serge Guelton Sergei Vorfolomeev <39548292+vorfol@users.noreply.github.com> Shuangchi He +Shaurya Barkund <64537538+Shaurya19@users.noreply.github.com> Shubham Gupta Shubham Gupta <63910248+shubham11941140@users.noreply.github.com> Shekhar Prasad Rajak @@ -659,10 +676,13 @@ Toshiki Kataoka Travis Oliphant Travis Oliphant Travis Oliphant +Vahid Tavanashad <120411540+vtavana@users.noreply.github.com> Valentin Haenel Valentin Haenel Vardhaman Kalloli <83634399+cyai@users.noreply.github.com> Varun Nayyar +Victor Herdeiro +Vijayakumar Z Vinith Kishore Vinith Kishore <85550536+vinith2@users.noreply.github.com> Vrinda Narayan diff --git a/doc/changelog/2.2.0-changelog.rst b/doc/changelog/2.2.0-changelog.rst new file mode 100644 index 000000000000..2a00d67b5736 --- /dev/null +++ b/doc/changelog/2.2.0-changelog.rst @@ -0,0 +1,428 @@ +Generating change log for range v2.2.0.dev0^..HEAD + +Contributors +============ + +A total of 106 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !fengluoqiuwu + +* !h-vetinari +* Abhishek Kumar + +* Abraham Medina + +* Aditi Juneja + +* Adrien Corenflos + +* Agriya Khetarpal +* Ajay Kumar Janapareddi +* Akula Guru Datta + +* Amit Subhash Chejara + +* Andrew Nelson +* Anne Gunn +* Aarni Koskela + +* Austin Ran + +* Ben Walsh +* Benjamin A. Beasley + +* Benoit Prabel + +* Charles Harris +* Chris Fu (傅立业) +* Chris Sidebottom +* Christian Lorentzen +* Christopher Sidebottom +* Clément Robert +* Dane Reimers + +* Dimitri Papadopoulos Orfanos +* Dreamge + +* Evgeni Burovski +* GUAN MING +* Habiba Hye + +* Harry Zhang + +* Hugo van Kemenade +* Ian Harris + +* Isaac Warren + +* Ishan Koradia + +* Ishan Purekar + +* Jake VanderPlas +* Jianyu Wen + +* Johannes Kaisinger +* John Kirkham +* Joren Hammudoglu +* João Eiras + +* KM Khalid Saifullah + +* Karel Planken + +* Katie Rust + +* Khem Raj +* Kira Prokopenko + +* Lars Grüter +* Linus Sommer +* Lucas Colley +* Luiz Eduardo Amaral +* Luke Aarohi + +* Marcel Telka + +* Mark Harfouche +* Marten van Kerkwijk +* Maryanne Wachter + +* Mateusz Sokół +* Matt Haberland +* Matthias Diener + +* Matthieu Darbois +* Matti Picus +* Maximilian Weigand + +* Melissa Weber Mendonça +* Michael Davidsaver + +* Nathan Goldbaum +* Nicolas Tessore + +* Nitish Satyavolu + +* Oscar Armas-Luy + +* Peter Hawkins +* Peter Kämpf + +* Pieter Eendebak +* Raghu Rajan + +* Raghuveer Devulapalli +* Ralf Gommers +* Robert Kern +* Rohit Goswami +* Ross Barnowski +* Ryan Teoh + +* Santhana Mikhail Antony S + +* Sayed Adel +* Sebastian Berg +* Sebastian Vittersø + +* Sebin Thomas + +* Serge Panev + +* Shaurya Barkund + +* Shiv Katira + +* Simon Altrogge +* Slava Gorloff + +* Slobodan Miletic + +* Soutrik Bandyopadhyay + +* Stan U. + +* Stefan van der Walt +* Tim Hoffmann +* Timo Röhling +* Tyler Reddy +* Vahid Tavanashad + +* Victor Herdeiro + +* Vijayakumar Z + +* Warren Weckesser +* Xiao Yuan + +* Yashasvi Misra +* bersbersbers + +* bilderbuchi + +* dependabot[bot] +* hutauf + +* musvaage + +* nullSoup + + +Pull requests merged +==================== + +A total of 307 pull requests were merged for this release. + +* `#14622 `__: BUG: fix datetime64/timedelta64 hash and match Python +* `#15181 `__: ENH: Add nd-support to trim_zeros +* `#17780 `__: ENH, BLD: Define RISCV-32 support +* `#23547 `__: DOC: Fix a typo in description and add an example of ``numpy.tensordot`` +* `#25984 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit +* `#26398 `__: DOC: order of indices returned in tril_indices and triu_indices +* `#26406 `__: DOC: Changed vdot docs as suggested +* `#26570 `__: CI, BLD: Use ``cibuildwheel`` to build WASM NumPy wheels +* `#26642 `__: DOC: Add examples to ``np.char`` +* `#26855 `__: TYP: improved ``numpy.frompyfunc`` type hints +* `#26857 `__: MAINT: Start applying ruff/Pycodestyle rules +* `#26865 `__: TYP: add missing annotations for ``numpy.object_.__new__`` +* `#26941 `__: TYP: Non-distributive ``numpy.generic`` type args. +* `#26944 `__: TYP: Annotate ``numpy._core._type_aliases`` . +* `#26979 `__: TYP: Explicit ``numpy.__all__`` in the stubs +* `#26994 `__: TYP: Typing fixes for ``numpy.iinfo`` & ``numpy.finfo`` +* `#27049 `__: BUG: f2py: better handle filtering of public/private subroutines +* `#27088 `__: WHL: bump (musl) linux image [wheel build] +* `#27100 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27101 `__: TST, DOC: add doc and test for transpose axes with negative indices +* `#27116 `__: DOC: update NEP 50 draft status to "Final" +* `#27119 `__: ENH: Use ``PyObject_GetOptionalAttr`` +* `#27132 `__: TYP: Assume that ``typing_extensions`` is always available in... +* `#27134 `__: REL: Prepare main for 2.2.0 development +* `#27139 `__: TYP: Fixed & improved ``numpy.dtype.__new__`` +* `#27140 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27143 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27145 `__: ENH: fix thread-unsafe C API usages +* `#27147 `__: BLD: use smaller scipy-openblas builds +* `#27148 `__: BUG: Raise if histogram cannot create finite bin sizes +* `#27150 `__: TYP: Sane defaults for the platform-specific ``NBitBase`` types. +* `#27152 `__: TYP: Simplified ufunc imports in ``numpy._typing`` +* `#27153 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27154 `__: TYP: Use ``typing_extensions.Self`` in the ``numpy`` stubs +* `#27156 `__: MAINT: Remove any promotion-state switching logic +* `#27157 `__: TYP: add td64 overload for ``np.mean`` +* `#27158 `__: CI: Re-enable nightly OpenBLAS test runs +* `#27160 `__: DEP: Finalize ``bool(empty_array)`` deprecation +* `#27164 `__: MAINT: use npy_argparse for einsum +* `#27168 `__: DOC: add td64 example in ``np.mean`` +* `#27171 `__: TYP: Shape-typed array constructors: ``numpy.{empty,zeros,ones,full}`` +* `#27177 `__: TYP: 1-d ``numpy.arange`` return shape-type +* `#27178 `__: TYP,TST: Bump mypy to 1.11.1 +* `#27179 `__: TYP: Improved ``numpy.piecewise`` type-hints +* `#27182 `__: REV: Revert undef I and document it +* `#27184 `__: BUILD: update to OpenBLAS 0.3.28 +* `#27187 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27189 `__: MAINT: improve download script +* `#27202 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27203 `__: DOC: update PyArray_CheckAxis doc +* `#27207 `__: TYP: Deprecate calling ``numpy.save`` with ``fix_imports`` (PEP... +* `#27208 `__: TYP: Disallow scalars and 0d-arrays in ``numpy.nonzero`` +* `#27210 `__: TYP: Semi-transparent ``numpy.shape`` shape-type annotations. +* `#27211 `__: TYP: Stop using ``Any`` as shape-type default +* `#27215 `__: MAINT: Bump github/codeql-action from 3.26.0 to 3.26.2 +* `#27218 `__: DEV: Add ``.editorconfig`` rules for Python +* `#27219 `__: TYP: Replace ``ellipsis`` with ``types.EllipsisType`` +* `#27220 `__: TYP: Fixed & improved ``TypeVar`` use for ``numpy.char.chararray`` +* `#27221 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.6 +* `#27223 `__: DOC: add docs on thread safety in NumPy +* `#27226 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27228 `__: DOC: Remove obsolete note from the top of the 2.0.0 release notes. +* `#27235 `__: MAINT: MSVC does not support #warning directive +* `#27237 `__: TYP: Fix several typing issues in ``numpy.polynomial`` +* `#27238 `__: DOC: update ``np.unique`` docstring +* `#27242 `__: MAINT: Update main after 2.1.0 release. +* `#27246 `__: MAINT: Bump github/codeql-action from 3.26.2 to 3.26.3 +* `#27247 `__: DOC: update documentation release process +* `#27249 `__: BUG: fix reference counting bug in __array_interface__ implementation +* `#27255 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27261 `__: TST: Add regression test for missing descr in array-interface +* `#27262 `__: BUG: Fix #27256 and #27257 +* `#27268 `__: MAINT: Bump github/codeql-action from 3.26.3 to 3.26.4 +* `#27272 `__: ENH: make check-{docs,tutorials} fail on dtype mismatch +* `#27275 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27277 `__: DOC/DEV/CI: mambaforge -> miniforge +* `#27281 `__: MAINT: Bump github/codeql-action from 3.26.4 to 3.26.5 +* `#27284 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27286 `__: MAINT: Update main after the 2.0.2 release +* `#27289 `__: MAINT: Start applying ruff rules (RUF) +* `#27290 `__: MAINT: Keep applying ruff/pyupgrade rules (UP) +* `#27291 `__: DOC, MAINT: Fix new typos found by codespell +* `#27292 `__: MAINT: Start applying ruff/flake8-type-checking rules (TCH) +* `#27293 `__: MAINT: Keep applying ruff/flake8-bugbear rules (B) +* `#27294 `__: BUILD: refactor circleci to use spin [skip actions][skip azp][skip... +* `#27295 `__: MAINT: Start applying rruff/flake8-pie rules (PIE) +* `#27296 `__: MAINT: Start applying ruff/flake8-comprehensions rules (C4) +* `#27297 `__: MAINT: Apply ruff/flake8-raise rules (RSE) +* `#27298 `__: MAINT: Apply ruff/flynt rules (FLY) +* `#27302 `__: BUG: Fix bug in ``doc/neps/tools/build_index.py`` +* `#27307 `__: MAINT: Apply ruff/pycodestyle warning rules (W) +* `#27311 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27312 `__: MAINT: Bump github/codeql-action from 3.26.5 to 3.26.6 +* `#27316 `__: BUILD: update pypy test version +* `#27320 `__: MAINT: increase max line length from 79 to 88, upgrade pycodestyle +* `#27322 `__: DOC: Removed reference to deprecated "newshape" parameter in... +* `#27323 `__: TYP: add ``ma.zeros_like`` and ``ma.ones_like`` typing +* `#27326 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27330 `__: BLD: Win-arm64 cross compile workflow +* `#27331 `__: MAINT: GitHub Actions: Replace deprecated macos-12 with macos-latest +* `#27332 `__: MAINT: Update main after 2.1.1 release. +* `#27334 `__: TYP: Concrete ``float64`` and ``complex128`` scalar types with... +* `#27335 `__: ENH: Add ``allow_pickle`` flag to ``savez`` +* `#27344 `__: MAINT: fix typos +* `#27346 `__: BUG,TYP: Allow subscripting ``iinfo`` and ``finfo`` generic types... +* `#27347 `__: DOC: Mention that c is reassigned but still points to a (quickstart) +* `#27353 `__: MNT, CI: Use separate jobs for WASM wheel builds/uploads +* `#27355 `__: MAINT: Bump actions/setup-python from 5.1.1 to 5.2.0 +* `#27356 `__: MAINT: Bump actions/upload-artifact from 4.3.6 to 4.4.0 +* `#27359 `__: MAINT: fix typo in random.binomial +* `#27360 `__: BUG: fix _shrink edge case in np.ma.mask_or +* `#27361 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27363 `__: DOC: Remove reshape from appearing twice in toctree +* `#27364 `__: DOC: Update np.\*stack doc to reflect behavior +* `#27365 `__: MAINT: Bump deadsnakes/action from 3.1.0 to 3.2.0 +* `#27369 `__: DOC: fix incorrect definitions +* `#27372 `__: CI: Update cirrus nightly token +* `#27376 `__: MAINT: Fix a few typos - and sometimes improve wording +* `#27381 `__: DOC: add vecdot to 'See also' of np.dot and np.inner +* `#27384 `__: MAINT: Fix a few more typos +* `#27385 `__: DOC: Update np.unique_all example to demonstrate namedtuple output +* `#27387 `__: DOC: Clarify np.searchsorted documentation and add example for... +* `#27390 `__: MAINT: Bump github/codeql-action from 3.26.6 to 3.26.7 +* `#27391 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.0 +* `#27392 `__: BUG: apply critical sections around populating the dispatch cache +* `#27403 `__: DOC: Fix minor issues in arrays.promotion.rst +* `#27406 `__: BUG: Stub out ``get_build_msvc_version`` if ``distutils.msvccompiler``... +* `#27408 `__: DOC: more informative _excluded_ argument explanation in np.vectorize +* `#27412 `__: MAINT: Bump pypa/cibuildwheel from 2.21.0 to 2.21.1 +* `#27414 `__: MAINT: add Python 3.13 to classifiers +* `#27417 `__: TYP: Allow callable ``converters`` arg in ``numpy.loadtxt`` +* `#27418 `__: TYP: Fix default return dtype of ``numpy.random.Generator.integers``... +* `#27419 `__: TYP: Modernized ``numpy.dtypes`` annotations +* `#27420 `__: TYP: Optional 2nd ``numpy.complexfloating`` type parameter +* `#27421 `__: BUG: Add regression test for gh-27273 +* `#27423 `__: TYP: Add missing type arguments +* `#27424 `__: DOC: Add release notes for #27334 +* `#27425 `__: MAINT: Use correct Python interpreter in tests +* `#27426 `__: MAINT: Bump github/codeql-action from 3.26.7 to 3.26.8 +* `#27427 `__: TYP: Fixed & improved type-hinting for ``any`` and ``all`` +* `#27429 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27430 `__: TYP: Fix type of ``copy`` argument in ``ndarray.reshape`` +* `#27431 `__: BUG: Allow unsigned shift argument for np.roll +* `#27434 `__: ENH: make np.dtype(scalar_type) return the default dtype instance +* `#27438 `__: BUG: Disable SVE VQSort +* `#27440 `__: DOC: Add a link to the migration guide for the deprecation warning... +* `#27441 `__: DOC: remove old versionadded comments from arrays.classes.rst +* `#27442 `__: DOC: Remove old versionchanged directives from config.rst +* `#27443 `__: updated the version of mean param from the release notes (2.0.0) +* `#27444 `__: TST: Added the test case for masked array tofile failing +* `#27445 `__: DOC: removed older versionadded directives to ufuncs.rst +* `#27448 `__: DOC: Example for char.array +* `#27453 `__: DOC: Added docstring for numpy.ma.take() function. +* `#27454 `__: DOC: Remove outdated versionadded/changed directives +* `#27458 `__: MAINT: Bump github/codeql-action from 3.26.8 to 3.26.9 +* `#27464 `__: DOC: Fix a copy-paste mistake in the cumulative_sum docstring. +* `#27465 `__: DOC: update ndindex reference in np.choose docstring +* `#27466 `__: BUG: rfftn axis bug +* `#27469 `__: DOC: Added ``CONTRIBUTING.rst`` +* `#27470 `__: TYP: Add type stubs for stringdtype in np.char and np.strings +* `#27472 `__: MAINT: Check for SVE support on demand +* `#27475 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27478 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27482 `__: Show shape any time it cannot be inferred in repr +* `#27485 `__: MAINT: Bump github/codeql-action from 3.26.9 to 3.26.10 +* `#27486 `__: MAINT: Bump scientific-python/upload-nightly-action from 0.5.0... +* `#27490 `__: API: register NEP 35 functions as array_functions +* `#27491 `__: MAINT: Bump mamba-org/setup-micromamba from 1.9.0 to 1.10.0 +* `#27495 `__: MAINT: Bump pypa/cibuildwheel from 2.21.1 to 2.21.2 +* `#27496 `__: MAINT: Bump mamba-org/setup-micromamba from 1.10.0 to 2.0.0 +* `#27497 `__: DOC: Correct selected C docstrings to eliminate warnings +* `#27499 `__: DOC: fix missing arguments (copy and device) from asanyarray's... +* `#27502 `__: MAINT: Bump github/codeql-action from 3.26.10 to 3.26.11 +* `#27503 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ +* `#27504 `__: ENH: Allow ``ndarray.__array_function__`` to dispatch functions... +* `#27508 `__: MAINT: Pin setuptools for testing [wheel build] +* `#27510 `__: TYP: Mark stub-only classes as ``@type_check_only`` +* `#27511 `__: TYP: Annotate type aliases without annotation +* `#27513 `__: MAINT: Update main after NumPy 2.1.2 release +* `#27517 `__: BENCH: Add benchmarks for np.non_zero +* `#27518 `__: TST: Add tests for np.nonzero with different input types +* `#27520 `__: TYP: Remove unused imports in the stubs +* `#27521 `__: TYP: Fill in the missing ``__all__`` exports +* `#27524 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.0 +* `#27525 `__: MAINT: Bump actions/upload-artifact from 4.4.0 to 4.4.1 +* `#27526 `__: MAINT: Bump github/codeql-action from 3.26.11 to 3.26.12 +* `#27532 `__: MAINT: Bump actions/cache from 4.1.0 to 4.1.1 +* `#27534 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27535 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27536 `__: MAINT: Bump actions/upload-artifact from 4.4.1 to 4.4.3 +* `#27549 `__: BUG: weighted quantile for some zero weights +* `#27550 `__: BLD: update vendored Meson to 1.5.2 +* `#27551 `__: MAINT: Bump github/codeql-action from 3.26.12 to 3.26.13 +* `#27553 `__: BLD: rename ``meson_options.txt`` to ``meson.options`` +* `#27555 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27556 `__: DOC: Clarify use of standard deviation in mtrand.pyx +* `#27557 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27558 `__: MAINT: distutils: remove obsolete search for ``ecc`` executable +* `#27560 `__: CI: start building Windows free-threaded wheels +* `#27564 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27567 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27568 `__: BUILD: vendor tempita from Cython +* `#27579 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27586 `__: MAINT: Update Highway to latest +* `#27587 `__: BLD: treat SVML object files better to avoid compiler warnings +* `#27595 `__: DOC: Clarify obj parameter types in numpy.delete documentation +* `#27598 `__: DOC: add examples to ctypeslib +* `#27602 `__: Update documentation for floating-point precision and determinant... +* `#27604 `__: DOC: Fix rendering in docstring of nan_to_num +* `#27612 `__: ENH: Add comments to ``string_fastsearch.h`` , rename some C-methods +* `#27613 `__: BUG: Fix Linux QEMU CI workflow +* `#27615 `__: ENH: Fix np.insert to handle boolean arrays as masks +* `#27617 `__: DOC: Update the RELEASE_WALKTHROUGH.rst file. +* `#27619 `__: MAINT: Bump actions/cache from 4.1.1 to 4.1.2 +* `#27620 `__: MAINT: Bump actions/dependency-review-action from 4.3.4 to 4.3.5 +* `#27621 `__: MAINT: Bump github/codeql-action from 3.26.13 to 3.27.0 +* `#27627 `__: ENH: Re-enable VSX from build targets for sin/cos +* `#27630 `__: ENH: Extern memory management to Cython +* `#27634 `__: MAINT: Bump actions/setup-python from 5.2.0 to 5.3.0 +* `#27636 `__: BUG: fixes for StringDType/unicode promoters +* `#27643 `__: BUG : avoid maximum fill value of datetime and timedelta return... +* `#27644 `__: DOC: Remove ambiguity in docs for ndarray.byteswap() +* `#27650 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27652 `__: TYP,TST: Bump ``mypy`` from ``1.11.1`` to ``1.13.0`` +* `#27653 `__: TYP: Fix Array API method signatures +* `#27659 `__: TYP: Transparent ``ndarray`` unary operator method signatures +* `#27661 `__: BUG: np.cov transpose control +* `#27663 `__: MAINT: fix wasm32 runtime type error in numpy._core +* `#27664 `__: MAINT: Bump actions/dependency-review-action from 4.3.5 to 4.4.0 +* `#27665 `__: ENH: Re-enable VXE from build targets for sin/cos +* `#27666 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27667 `__: TYP: Allow returning non-array-likes from the ``apply_along_axis``... +* `#27676 `__: CI: Attempt to fix CI on 32 bit linux +* `#27678 `__: DOC: fix incorrect versionadded for np.std +* `#27680 `__: MAINT: fix typo / copy paste error +* `#27681 `__: TYP: Fix some inconsistencies in the scalar methods and properties +* `#27683 `__: TYP: Improve ``np.sum`` and ``np.mean`` return types with given... +* `#27684 `__: DOC: fix spelling of "reality" in ``_nanfunctions_impl.pyi`` +* `#27685 `__: MAINT: Drop useless shebang +* `#27691 `__: TYP: Use ``_typeshed`` to clean up the stubs +* `#27693 `__: MAINT: Update main after 2.1.3 release. +* `#27695 `__: BUG: Fix multiple modules in F2PY and COMMON handling +* `#27702 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.4 to 3.1.0 +* `#27705 `__: MAINT: Bump mamba-org/setup-micromamba from 2.0.0 to 2.0.1 +* `#27706 `__: DOC: Remove empty notes +* `#27707 `__: CI: Set up free-threaded CI using quansight-labs/setup-python +* `#27708 `__: DOC: Remove version notes +* `#27714 `__: DOC: fix a mistake in the docstring of vector_norm +* `#27715 `__: BUG: fix incorrect output descriptor in fancy indexing +* `#27716 `__: ENH: Make ``__module__`` attribute coherent across API +* `#27721 `__: DOC: fix name of shape parameter kappa of von Mises distribution +* `#27723 `__: BUG: Allow empty memmaps in most situations +* `#27724 `__: MAINT: Bump github/codeql-action from 3.27.0 to 3.27.1 +* `#27728 `__: BUG: Handle ``--lower`` for F2PY directives and callbacks +* `#27729 `__: BUG: f2py: fix issues with thread-local storage define +* `#27730 `__: TST: Add an F2PY check for exposing variables without functions +* `#27731 `__: BUG: Fix ``fortranname`` for functions +* `#27734 `__: Fix documentation for the chi-square distribution +* `#27735 `__: ENH: Add a ``__dict__`` to ufunc objects and allow overriding... +* `#27736 `__: TYP: Optional ``numpy.number`` type parameters +* `#27742 `__: MAINT: Bump github/codeql-action from 3.27.1 to 3.27.2 +* `#27743 `__: DOC: Fix typos in subclassing documentation +* `#27746 `__: DOC: Added additional guidance for compiling in Windows +* `#27750 `__: TYP: Fix ``ndarray.item()`` and improve ``ndarray.tolist()`` +* `#27753 `__: TYP: Fix the annotations of ``ndarray.real`` and ``ndarray.imag`` +* `#27754 `__: MAINT: Bump github/codeql-action from 3.27.2 to 3.27.3 +* `#27755 `__: TYP: Annotate ``__setitem__`` , ``__contains__`` and ``__iter__``... +* `#27756 `__: TYP: 1-d shape-typing for ``ndarray.flatten`` and ``ravel`` +* `#27757 `__: TYP: Remove the non-existent ``bitwise_count`` methods of ``ndarray``... +* `#27758 `__: TYP: Remove ``ndarray`` binop overloads for ``NDArray[Never]`` +* `#27763 `__: DOC: Note that allow-pickle is not safe also in error +* `#27765 `__: TYP: Shape-typed ``ndarray`` inplace binary operator methods. +* `#27766 `__: MAINT: Bump github/codeql-action from 3.27.3 to 3.27.4 +* `#27767 `__: TYP: Support shape-typing in ``reshape`` and ``resize`` +* `#27769 `__: TYP: Towards a less messy ``__init__.pyi`` +* `#27770 `__: TYP: Fix incorrect baseclass of ``linalg.LinAlgError`` +* `#27771 `__: ENH: ``default_rng`` coerces ``RandomState`` to ``Generator`` +* `#27773 `__: BUG: Fix repeat, accumulate for strings and accumulate API logic +* `#27775 `__: TYP: Fix undefined type-parameter name +* `#27776 `__: TYP: Fix method overload issues in ``ndarray`` and ``generic`` +* `#27778 `__: TYP: Generic ``numpy.generic`` type parameter for the ``item()``... +* `#27779 `__: TYP: Type hints for ``numpy.__config__`` +* `#27788 `__: DOC: Make wording in absolute beginners guide more beginner friendly +* `#27790 `__: TYP: Generic ``timedelta64`` and ``datetime64`` scalar types +* `#27792 `__: TYP: Generic ``numpy.bool`` and statically typed boolean logic +* `#27794 `__: MAINT: Upgrade to spin 0.13 +* `#27795 `__: update pythoncapi-compat to latest HEAD +* `#27800 `__: BUG: Ensure context path is taken in masked array array-wrap +* `#27802 `__: BUG: Ensure that same-kind casting works for uints (mostly) +* `#27803 `__: MAINT: Bump github/codeql-action from 3.27.4 to 3.27.5 +* `#27806 `__: DOC: Improve choice() documentation about return types +* `#27807 `__: BUG,ENH: Fix internal ``__array_wrap__`` for direct calls +* `#27808 `__: ENH: Ensure hugepages are also indicated for calloc allocations +* `#27809 `__: BUG: Fix array flags propagation in boolean indexing +* `#27810 `__: MAINT: Bump actions/dependency-review-action from 4.4.0 to 4.5.0 +* `#27812 `__: BUG: ``timedelta64.__[r]divmod__`` segfaults for incompatible... +* `#27813 `__: DOC: fix broken reference in arrays.classes.rst +* `#27815 `__: DOC: Add a release fragment for gh-14622 +* `#27816 `__: MAINT: Fixup that spin can be installed via conda too now +* `#27817 `__: DEV: changelog: make title processing more robust +* `#27828 `__: CI: skip ninja installation in linux_qemu workflows +* `#27829 `__: CI: update circleci to python3.11.10, limit parallel builds.... +* `#27831 `__: BUG: Fix mismatch in definition and declaration for a couple... +* `#27843 `__: DOC: Correct version-added for mean arg for nanvar and nanstd +* `#27845 `__: BUG: Never negate strides in reductions (for now) +* `#27846 `__: ENH: add matvec and vecmat gufuncs +* `#27852 `__: DOC: Correct versionadded for vecmat and matvec. + diff --git a/doc/release/upcoming_changes/14622.improvement.rst b/doc/release/upcoming_changes/14622.improvement.rst deleted file mode 100644 index 3a3cd01f305d..000000000000 --- a/doc/release/upcoming_changes/14622.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -* The ``datetime64`` and ``timedelta64`` hashes now - correctly match the Pythons builtin ``datetime`` and - ``timedelta`` ones. The hashes now evaluated equal - even for equal values with different time units. diff --git a/doc/release/upcoming_changes/25675.new_feature.rst b/doc/release/upcoming_changes/25675.new_feature.rst deleted file mode 100644 index f048e1f1020a..000000000000 --- a/doc/release/upcoming_changes/25675.new_feature.rst +++ /dev/null @@ -1,20 +0,0 @@ -New functions for matrix-vector and vector-matrix products ----------------------------------------------------------- - -Two new generalized ufuncs were defined: - -* `numpy.matvec` - matrix-vector product, treating the arguments as - stacks of matrices and column vectors, respectively. - -* `numpy.vecmat` - vector-matrix product, treating the arguments as - stacks of column vectors and matrices, respectively. For complex - vectors, the conjugate is taken. - -These add to the existing `numpy.matmul` as well as to `numpy.vecdot`, -which was added in numpy 2.0. - -Note that `numpy.matmul` never takes a complex conjugate, also not -when its left input is a vector, while both `numpy.vecdot` and -`numpy.vecmat` do take the conjugate for complex vectors on the -left-hand side (which are taken to be the ones that are transposed, -following the physics convention). diff --git a/doc/release/upcoming_changes/26766.change.rst b/doc/release/upcoming_changes/26766.change.rst deleted file mode 100644 index f9223a1d1114..000000000000 --- a/doc/release/upcoming_changes/26766.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -* `numpy.fix` now won't perform casting to a floating data-type for integer - and boolean data-type input arrays. diff --git a/doc/release/upcoming_changes/27088.change.rst b/doc/release/upcoming_changes/27088.change.rst deleted file mode 100644 index c9057ba53ea0..000000000000 --- a/doc/release/upcoming_changes/27088.change.rst +++ /dev/null @@ -1,2 +0,0 @@ -Bump the musllinux CI image and wheels to 1_2 from 1_1. This is because 1_1 is -`end of life `_. diff --git a/doc/release/upcoming_changes/27119.performance.rst b/doc/release/upcoming_changes/27119.performance.rst deleted file mode 100644 index abf7b58e4e8a..000000000000 --- a/doc/release/upcoming_changes/27119.performance.rst +++ /dev/null @@ -1,4 +0,0 @@ -* NumPy now uses fast-on-failure attribute lookups for protocols. - This can greatly reduce overheads of function calls or array creation - especially with custom Python objects. The largest improvements - will be seen on Python 3.12 or newer. diff --git a/doc/release/upcoming_changes/27147.performance.rst b/doc/release/upcoming_changes/27147.performance.rst deleted file mode 100644 index f2ec14212ef1..000000000000 --- a/doc/release/upcoming_changes/27147.performance.rst +++ /dev/null @@ -1,8 +0,0 @@ -* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on - benchmarking, there are 5 clusters of performance around these kernels: - ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. - -* OpenBLAS on windows is linked without quadmath, simplifying licensing - -* Due to a regression in OpenBLAS on windows, the performance improvements - when using multiple threads for OpenBLAS 0.3.26 were reverted. diff --git a/doc/release/upcoming_changes/27156.change.rst b/doc/release/upcoming_changes/27156.change.rst deleted file mode 100644 index 5902b76d4332..000000000000 --- a/doc/release/upcoming_changes/27156.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -NEP 50 promotion state option removed -------------------------------------- -The NEP 50 promotion state settings are now removed. They were always -meant as temporary means for testing. -A warning will be given if the environment variable is set to anything -but ``NPY_PROMOTION_STATE=weak`` while ``_set_promotion_state`` -and ``_get_promotion_state`` are removed. -In case code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` -could be used to replace it when not available. diff --git a/doc/release/upcoming_changes/27160.expired.rst b/doc/release/upcoming_changes/27160.expired.rst deleted file mode 100644 index 9334aed2bad6..000000000000 --- a/doc/release/upcoming_changes/27160.expired.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``bool(np.array([]))`` and other empty arrays will now raise an error. - Use ``arr.size > 0`` instead to check whether an array has no elements. diff --git a/doc/release/upcoming_changes/27334.change.rst b/doc/release/upcoming_changes/27334.change.rst deleted file mode 100644 index e8d98ced1776..000000000000 --- a/doc/release/upcoming_changes/27334.change.rst +++ /dev/null @@ -1,9 +0,0 @@ -* The type annotations of ``numpy.float64`` and ``numpy.complex128`` now - reflect that they are also subtypes of the built-in ``float`` and ``complex`` - types, respectively. This update prevents static type-checkers from reporting - errors in cases such as: - - .. code-block:: python - - x: float = numpy.float64(6.28) # valid - z: complex = numpy.complex128(-1j) # valid diff --git a/doc/release/upcoming_changes/27420.new_feature.rst b/doc/release/upcoming_changes/27420.new_feature.rst deleted file mode 100644 index 7f6e223cda62..000000000000 --- a/doc/release/upcoming_changes/27420.new_feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``np.complexfloating[T, T]`` can now also be written as - ``np.complexfloating[T]`` diff --git a/doc/release/upcoming_changes/27482.change.rst b/doc/release/upcoming_changes/27482.change.rst deleted file mode 100644 index 3c974077e0d0..000000000000 --- a/doc/release/upcoming_changes/27482.change.rst +++ /dev/null @@ -1,8 +0,0 @@ -* The ``repr`` of arrays large enough to be summarized (i.e., where elements - are replaced with ``...``) now includes the ``shape`` of the array, similar - to what already was the case for arrays with zero size and non-obvious - shape. With this change, the shape is always given when it cannot be - inferred from the values. Note that while written as ``shape=...``, this - argument cannot actually be passed in to the ``np.array`` constructor. If - you encounter problems, e.g., due to failing doctests, you can use the print - option ``legacy=2.1`` to get the old behaviour. diff --git a/doc/release/upcoming_changes/27636.improvement.rst b/doc/release/upcoming_changes/27636.improvement.rst deleted file mode 100644 index 53c202b31197..000000000000 --- a/doc/release/upcoming_changes/27636.improvement.rst +++ /dev/null @@ -1,3 +0,0 @@ -* Fixed a number of issues around promotion for string ufuncs with StringDType - arguments. Mixing StringDType and the fixed-width DTypes using the string - ufuncs should now generate much more uniform results. diff --git a/doc/release/upcoming_changes/27661.compatibility.rst b/doc/release/upcoming_changes/27661.compatibility.rst deleted file mode 100644 index 0482f876766c..000000000000 --- a/doc/release/upcoming_changes/27661.compatibility.rst +++ /dev/null @@ -1,5 +0,0 @@ -* `numpy.cov` now properly transposes single-row (2d array) design matrices - when ``rowvar=False``. Previously, single-row design matrices would - return a scalar in this scenario, which is not correct, so this - is a behavior change and an array of the appropriate shape will - now be returned. diff --git a/doc/release/upcoming_changes/27695.improvement.rst b/doc/release/upcoming_changes/27695.improvement.rst deleted file mode 100644 index 95584b6e90ce..000000000000 --- a/doc/release/upcoming_changes/27695.improvement.rst +++ /dev/null @@ -1,5 +0,0 @@ -``f2py`` handles multiple modules and exposes variables again -------------------------------------------------------------- -A regression has been fixed which allows F2PY users to expose variables to -Python in modules with only assignments, and also fixes situations where -multiple modules are present within a single source file. diff --git a/doc/release/upcoming_changes/27723.improvement.rst b/doc/release/upcoming_changes/27723.improvement.rst deleted file mode 100644 index bffc9d5a17de..000000000000 --- a/doc/release/upcoming_changes/27723.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Improved support for empty `memmap`. Previously an empty `memmap` would fail - unless a non-zero ``offset`` was set. Now a zero-size `memmap` is supported - even if ``offset=0``. To achieve this, if a `memmap` is mapped to an empty - file that file is padded with a single byte. diff --git a/doc/release/upcoming_changes/27735.deprecation.rst b/doc/release/upcoming_changes/27735.deprecation.rst deleted file mode 100644 index 897a3871264b..000000000000 --- a/doc/release/upcoming_changes/27735.deprecation.rst +++ /dev/null @@ -1,2 +0,0 @@ -* ``_add_newdoc_ufunc`` is now deprecated. ``ufunc.__doc__ = newdoc`` should - be used instead. diff --git a/doc/release/upcoming_changes/27735.new_feature.rst b/doc/release/upcoming_changes/27735.new_feature.rst deleted file mode 100644 index 4d216218399d..000000000000 --- a/doc/release/upcoming_changes/27735.new_feature.rst +++ /dev/null @@ -1,4 +0,0 @@ -* UFuncs now support ``__dict__`` attribute and allow overriding ``__doc__`` - (either directly or via ``ufunc.__dict__["__doc__"]``). ``__dict__`` can be - used to also override other properties, such as ``__module__`` or - ``__qualname__``. diff --git a/doc/release/upcoming_changes/27736.new_feature.rst b/doc/release/upcoming_changes/27736.new_feature.rst deleted file mode 100644 index 01422db19726..000000000000 --- a/doc/release/upcoming_changes/27736.new_feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -* The "nbit" type parameter of ``np.number`` and its subtypes now defaults - to ``typing.Any``. This way, type-checkers will infer annotations such as - ``x: np.floating`` as ``x: np.floating[Any]``, even in strict mode. diff --git a/doc/release/upcoming_changes/27807.change.rst b/doc/release/upcoming_changes/27807.change.rst deleted file mode 100644 index 995c1770e224..000000000000 --- a/doc/release/upcoming_changes/27807.change.rst +++ /dev/null @@ -1,4 +0,0 @@ -* Calling ``__array_wrap__`` directly on NumPy arrays or scalars - now does the right thing when ``return_scalar`` is passed - (Added in NumPy 2). It is further safe now to call the scalar - ``__array_wrap__`` on a non-scalar result. diff --git a/doc/release/upcoming_changes/27808.performance.rst b/doc/release/upcoming_changes/27808.performance.rst deleted file mode 100644 index e3d5648d3d38..000000000000 --- a/doc/release/upcoming_changes/27808.performance.rst +++ /dev/null @@ -1,2 +0,0 @@ -* NumPy now indicates hugepages also for large ``np.zeros`` allocations - on linux. Thus should generally improve performance. diff --git a/doc/source/release/2.2.0-notes.rst b/doc/source/release/2.2.0-notes.rst index 125653352572..01dc98078487 100644 --- a/doc/source/release/2.2.0-notes.rst +++ b/doc/source/release/2.2.0-notes.rst @@ -4,16 +4,202 @@ NumPy 2.2.0 Release Notes ========================== +The NumPy 2.2.0 release is quick release that brings us back into sync with the +usual twice yearly release cycle. There have been an number of small cleanups, +as well as work bringing the new StringDType to completion and improving support +for free threaded Python. Highlights are: -Highlights -========== +* New functions ``matvec`` and ``vecmat``, see below. +* Many improved annotations. +* Improved support for the new StringDType. +* Improved support for free threaded Python +* Fixes for f2py -*We'll choose highlights for this release near the end of the release cycle.* +This release supports Python versions 3.10-3.13. -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) +Deprecations +============ -.. **Content from release note snippets in doc/release/upcoming_changes:** +* ``_add_newdoc_ufunc`` is now deprecated. ``ufunc.__doc__ = newdoc`` should + be used instead. + + (`gh-27735 `__) + + +Expired deprecations +==================== + +* ``bool(np.array([]))`` and other empty arrays will now raise an error. + Use ``arr.size > 0`` instead to check whether an array has no elements. + + (`gh-27160 `__) + + +Compatibility notes +=================== + +* `numpy.cov` now properly transposes single-row (2d array) design matrices + when ``rowvar=False``. Previously, single-row design matrices would return a + scalar in this scenario, which is not correct, so this is a behavior change + and an array of the appropriate shape will now be returned. + + (`gh-27661 `__) + + +New Features +============ + +* New functions for matrix-vector and vector-matrix products + + Two new generalized ufuncs were defined: + + * `numpy.matvec` - matrix-vector product, treating the arguments as + stacks of matrices and column vectors, respectively. + + * `numpy.vecmat` - vector-matrix product, treating the arguments as + stacks of column vectors and matrices, respectively. For complex + vectors, the conjugate is taken. + + These add to the existing `numpy.matmul` as well as to `numpy.vecdot`, + which was added in numpy 2.0. + + Note that `numpy.matmul` never takes a complex conjugate, also not + when its left input is a vector, while both `numpy.vecdot` and + `numpy.vecmat` do take the conjugate for complex vectors on the + left-hand side (which are taken to be the ones that are transposed, + following the physics convention). + + (`gh-25675 `__) + +* ``np.complexfloating[T, T]`` can now also be written as + ``np.complexfloating[T]`` + + (`gh-27420 `__) + +* UFuncs now support ``__dict__`` attribute and allow overriding ``__doc__`` + (either directly or via ``ufunc.__dict__["__doc__"]``). ``__dict__`` can be + used to also override other properties, such as ``__module__`` or + ``__qualname__``. + + (`gh-27735 `__) + +* The "nbit" type parameter of ``np.number`` and its subtypes now defaults + to ``typing.Any``. This way, type-checkers will infer annotations such as + ``x: np.floating`` as ``x: np.floating[Any]``, even in strict mode. + + (`gh-27736 `__) + + +Improvements +============ + +* The ``datetime64`` and ``timedelta64`` hashes now correctly match the Pythons + builtin ``datetime`` and ``timedelta`` ones. The hashes now evaluated equal + even for equal values with different time units. + + (`gh-14622 `__) + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + +* Improved support for empty `memmap`. Previously an empty `memmap` would fail + unless a non-zero ``offset`` was set. Now a zero-size `memmap` is supported + even if ``offset=0``. To achieve this, if a `memmap` is mapped to an empty + file that file is padded with a single byte. + + (`gh-27723 `__) + +``f2py`` handles multiple modules and exposes variables again +------------------------------------------------------------- +A regression has been fixed which allows F2PY users to expose variables to +Python in modules with only assignments, and also fixes situations where +multiple modules are present within a single source file. + +(`gh-27695 `__) + + +Performance improvements and changes +==================================== + +* NumPy now uses fast-on-failure attribute lookups for protocols. This can + greatly reduce overheads of function calls or array creation especially with + custom Python objects. The largest improvements will be seen on Python 3.12 + or newer. + + (`gh-27119 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + +* Due to a regression in OpenBLAS on windows, the performance improvements + when using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +* NumPy now indicates hugepages also for large ``np.zeros`` allocations + on linux. Thus should generally improve performance. + + (`gh-27808 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + +* The type annotations of ``numpy.float64`` and ``numpy.complex128`` now + reflect that they are also subtypes of the built-in ``float`` and ``complex`` + types, respectively. This update prevents static type-checkers from reporting + errors in cases such as: + + .. code-block:: python + + x: float = numpy.float64(6.28) # valid + z: complex = numpy.complex128(-1j) # valid + + (`gh-27334 `__) + +* The ``repr`` of arrays large enough to be summarized (i.e., where elements + are replaced with ``...``) now includes the ``shape`` of the array, similar + to what already was the case for arrays with zero size and non-obvious + shape. With this change, the shape is always given when it cannot be + inferred from the values. Note that while written as ``shape=...``, this + argument cannot actually be passed in to the ``np.array`` constructor. If + you encounter problems, e.g., due to failing doctests, you can use the print + option ``legacy=2.1`` to get the old behaviour. + + (`gh-27482 `__) + +* Calling ``__array_wrap__`` directly on NumPy arrays or scalars now does the + right thing when ``return_scalar`` is passed (Added in NumPy 2). It is + further safe now to call the scalar ``__array_wrap__`` on a non-scalar + result. + + (`gh-27807 `__) + +Bump the musllinux CI image and wheels to 1_2 from 1_1. This is because 1_1 is +`end of life `_. + +(`gh-27088 `__) + +NEP 50 promotion state option removed +------------------------------------- +The NEP 50 promotion state settings are now removed. They were always meant as +temporary means for testing. A warning will be given if the environment +variable is set to anything but ``NPY_PROMOTION_STATE=weak`` while +``_set_promotion_state`` and ``_get_promotion_state`` are removed. In case +code used ``_no_nep50_warning``, a ``contextlib.nullcontext`` could be used to +replace it when not available. + +(`gh-27156 `__) -.. include:: notes-towncrier.rst diff --git a/pyproject.toml b/pyproject.toml index 73e2021d9e95..6aece2d9798d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.0.dev0" +version = "2.2.0rc1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} diff --git a/tools/changelog.py b/tools/changelog.py index b065cda9f399..4498bb93bd9a 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -135,7 +135,7 @@ def main(token, revision_range): def backtick_repl(matchobj): """repl to add an escaped space following a code block if needed""" if matchobj.group(2) != ' ': - post = r'\ ' + matchobj.group(2) + post = r' ' + matchobj.group(2) else: post = matchobj.group(2) return '``' + matchobj.group(1) + '``' + post From 0be8037526c8d0a14903728e46c7e5f71835ea2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 28 Nov 2024 12:04:35 +0100 Subject: [PATCH 011/187] BUG: fix importing numpy in Python's optimized mode (#27868) * TST: add PYTHONOPTIMIZE=2 to full CI (linux) * BUG: fix importing numpy in Python's optimized mode * TST: fix a test for compatibility with PYTHONOPTIMIZE --- .github/workflows/linux.yml | 2 ++ numpy/_core/overrides.py | 2 +- numpy/lib/tests/test_io.py | 6 ++++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 6ce78801a5e1..8b5461d945e9 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -152,6 +152,8 @@ jobs: run: | pytest numpy --cov-report=html:build/coverage # TODO: gcov + env: + PYTHONOPTIMIZE: 2 benchmark: needs: [smoke_test] diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index 41f42ab26fae..cb466408cd39 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -19,7 +19,7 @@ compatible with that passed in via this argument.""" ) -def get_array_function_like_doc(public_api, docstring_template=None): +def get_array_function_like_doc(public_api, docstring_template=""): ARRAY_FUNCTIONS.add(public_api) docstring = public_api.__doc__ or docstring_template return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 44aac93db1ff..742915e22ef0 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2796,8 +2796,10 @@ def test_load_multiple_arrays_until_eof(): np.save(f, 1) np.save(f, 2) f.seek(0) - assert np.load(f) == 1 - assert np.load(f) == 2 + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 with pytest.raises(EOFError): np.load(f) From 357e8d51cce0020f6c9dc3174e55d2ca4f07a10f Mon Sep 17 00:00:00 2001 From: "Stan U." <89152624+StanFromIreland@users.noreply.github.com> Date: Fri, 29 Nov 2024 19:44:26 +0000 Subject: [PATCH 012/187] DOC: Fix double import in docs (#27878) [skip cirrus] [skip azp] [skip actions] --- doc/source/reference/random/index.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 976a03a9a449..77d39d0e771f 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -65,7 +65,6 @@ arbitrary 128-bit integer. >>> import numpy as np >>> import secrets - >>> import numpy as np >>> secrets.randbits(128) #doctest: +SKIP 122807528840384100672342137672332424406 # may vary >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) From 500c5bd9f680547bc367be39dd68b3372e911507 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Thu, 28 Nov 2024 14:02:52 +0100 Subject: [PATCH 013/187] MAINT: Ensure correct handling for very large unicode strings In the future, we can handle these strings (in parts we already can maybe), but for now have to stick to `int` length because more of the code needs cleanup to actually use it safely. (For user dtypes this is less of a problem, although corner cases probably exist.) This adds necessary checks to avoid large unicode dtypes. --- numpy/_core/src/multiarray/common.c | 13 ++- numpy/_core/src/multiarray/convert_datatype.c | 4 + numpy/_core/src/multiarray/descriptor.c | 25 ++++-- numpy/_core/src/multiarray/dtypemeta.c | 10 ++- numpy/_core/src/umath/string_ufuncs.cpp | 9 ++ numpy/_core/tests/test_strings.py | 84 ++++++++++++++++++- 6 files changed, 130 insertions(+), 15 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 236ed11e058d..055bbfd58137 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -62,7 +62,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { - int itemsize; + npy_intp itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); @@ -75,6 +75,12 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT) { + /* We can allow this, but should audit code paths before we do. */ + PyErr_SetString(PyExc_TypeError, + "string too large to store inside array."); + return NULL; + } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); @@ -86,6 +92,11 @@ PyArray_DTypeFromObjectStringDiscovery( if (itemsize < 0) { return NULL; } + if (itemsize > NPY_MAX_INT / 4) { + PyErr_SetString(PyExc_TypeError, + "string too large to store inside array."); + return NULL; + } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 67f0a4d509fa..91174340c196 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -2403,6 +2403,10 @@ cast_to_string_resolve_descriptors( return -1; } if (dtypes[1]->type_num == NPY_UNICODE) { + if (size > NPY_MAX_INT / 4) { + PyErr_SetString(PyExc_TypeError, "Result string too large."); + return -1; + } size *= 4; } diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index 3ed3c36d4bba..006a5504f728 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -274,8 +274,16 @@ _convert_from_tuple(PyObject *obj, int align) if (PyDataType_ISUNSIZED(type)) { /* interpret next item as a typesize */ int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - - if (error_converting(itemsize)) { + if (type->type_num == NPY_UNICODE) { + if (itemsize > NPY_MAX_INT / 4) { + itemsize = -1; + } + else { + itemsize *= 4; + } + } + if (itemsize < 0) { + /* Error may or may not be set by PyIntAsInt. */ PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type tuple"); Py_DECREF(type); @@ -285,12 +293,8 @@ _convert_from_tuple(PyObject *obj, int align) if (type == NULL) { return NULL; } - if (type->type_num == NPY_UNICODE) { - type->elsize = itemsize << 2; - } - else { - type->elsize = itemsize; - } + + type->elsize = itemsize; return type; } else if (type->metadata && (PyDict_Check(val) || PyDictProxy_Check(val))) { @@ -1861,7 +1865,10 @@ _convert_from_str(PyObject *obj, int align) */ case NPY_UNICODELTR: check_num = NPY_UNICODE; - elsize <<= 2; + if (elsize > (NPY_MAX_INT / 4)) { + goto fail; + } + elsize *= 4; break; case NPY_VOIDLTR: diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 8d75f991f112..a60e6fd59fd9 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -494,12 +494,14 @@ string_discover_descr_from_pyobject( itemsize = PyUnicode_GetLength(obj); } if (itemsize != -1) { - if (cls->type_num == NPY_UNICODE) { - itemsize *= 4; - } - if (itemsize > NPY_MAX_INT) { + if (itemsize > NPY_MAX_INT || ( + cls->type_num == NPY_UNICODE && itemsize > NPY_MAX_INT / 4)) { PyErr_SetString(PyExc_TypeError, "string too large to store inside array."); + return NULL; + } + if (cls->type_num == NPY_UNICODE) { + itemsize *= 4; } PyArray_Descr *res = PyArray_DescrNewFromType(cls->type_num); if (res == NULL) { diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 2bc4ce20acd6..26fce9b61f54 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,6 +643,12 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { + /* NOTE: elsize is large enough now, but too much code still uses ints */ + if (given_descrs[0]->elsize + given_descrs[1]->elsize > NPY_MAX_INT) { + PyErr_SetString(PyExc_TypeError, "Result string too large."); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); if (loop_descrs[0] == NULL) { return _NPY_ERROR_OCCURRED_IN_CAST; @@ -650,11 +656,14 @@ string_addition_resolve_descriptors( loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); if (loop_descrs[1] == NULL) { + Py_DECREF(loop_descrs[0]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2] = PyArray_DescrNew(loop_descrs[0]); if (loop_descrs[2] == NULL) { + Py_DECREF(loop_descrs[0]); + Py_DECREF(loop_descrs[1]); return _NPY_ERROR_OCCURRED_IN_CAST; } loop_descrs[2]->elsize += loop_descrs[1]->elsize; diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index a94b52939b1d..e1798de917ae 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -5,7 +5,7 @@ import numpy as np from numpy.testing import assert_array_equal, assert_raises, IS_PYPY - +from numpy.testing._private.utils import requires_memory COMPARISONS = [ (operator.eq, np.equal, "=="), @@ -109,6 +109,88 @@ def test_float_to_string_cast(str_dt, float_dt): assert_array_equal(res, np.array(expected, dtype=str_dt)) +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + return + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + @pytest.mark.parametrize("dt", ["S", "U", "T"]) class TestMethods: From 79baa2aabb26f5073bf3df0bfebb2826ecc4bb78 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 3 Dec 2024 13:36:53 +0100 Subject: [PATCH 014/187] TST: Use skipif in test to signal that the test did nothing --- numpy/_core/tests/test_strings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index e1798de917ae..9fe4c2693599 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -141,7 +141,7 @@ def test_large_string_coercion_error(str_dt): large_string = "A" * (very_large + 1) except Exception: # We may not be able to create this Python string on 32bit. - return + pytest.skip("python failed to create huge string") class MyStr: def __str__(self): From 361abb693b9b728f1abb6f00f9860f720347b4e4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 4 Dec 2024 10:35:54 +0100 Subject: [PATCH 015/187] Add length information to exception Also add future proof guard, just in case we got a larger string in addition. --- numpy/_core/src/multiarray/common.c | 8 ++++---- numpy/_core/src/multiarray/convert_datatype.c | 3 ++- numpy/_core/src/umath/string_ufuncs.cpp | 14 +++++++++++--- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 055bbfd58137..8236ec5c65ae 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -77,8 +77,8 @@ PyArray_DTypeFromObjectStringDiscovery( } if (itemsize > NPY_MAX_INT) { /* We can allow this, but should audit code paths before we do. */ - PyErr_SetString(PyExc_TypeError, - "string too large to store inside array."); + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); return NULL; } } @@ -93,8 +93,8 @@ PyArray_DTypeFromObjectStringDiscovery( return NULL; } if (itemsize > NPY_MAX_INT / 4) { - PyErr_SetString(PyExc_TypeError, - "string too large to store inside array."); + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", itemsize); return NULL; } itemsize *= 4; /* convert UCS4 codepoints to bytes */ diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 91174340c196..1dff38a1d1ef 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -2404,7 +2404,8 @@ cast_to_string_resolve_descriptors( } if (dtypes[1]->type_num == NPY_UNICODE) { if (size > NPY_MAX_INT / 4) { - PyErr_SetString(PyExc_TypeError, "Result string too large."); + PyErr_Format(PyExc_TypeError, + "string of length %zd is too large to store inside array.", size); return -1; } size *= 4; diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index 26fce9b61f54..0e28240ee5f0 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -643,9 +643,17 @@ string_addition_resolve_descriptors( PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { - /* NOTE: elsize is large enough now, but too much code still uses ints */ - if (given_descrs[0]->elsize + given_descrs[1]->elsize > NPY_MAX_INT) { - PyErr_SetString(PyExc_TypeError, "Result string too large."); + npy_intp result_itemsize = given_descrs[0]->elsize + given_descrs[1]->elsize; + + /* NOTE: elsize can fit more than MAX_INT, but some code may still use ints */ + if (result_itemsize > NPY_MAX_INT || result_itemsize < 0) { + npy_intp length = result_itemsize; + if (given_descrs[0]->type == NPY_UNICODE) { + length /= 4; + } + PyErr_Format(PyExc_TypeError, + "addition result string of length %zd is too large to store inside array.", + length); return _NPY_ERROR_OCCURRED_IN_CAST; } From c2a5f7042aaacbe71c1e75adfecefaa6264bd6ab Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Mon, 25 Nov 2024 13:44:55 -0800 Subject: [PATCH 016/187] Use mask_store instead of store for compiler workaround gcc >= 12.x has a bug where using -O3 with -mavx512f generates an vextractf64x2 instruction which requires avx512dq. See https://gcc.godbolt.org/z/xT6osP173. This workaround prevents it from generating this optimization. --- numpy/_core/src/umath/loops_exponent_log.dispatch.c.src | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index a4acc4437b1b..190ea6b8be72 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1074,10 +1074,14 @@ AVX512F_log_DOUBLE(npy_double * op, _mm512_mask_storeu_pd(op, load_mask, res); } - /* call glibc's log func when x around 1.0f */ + /* call glibc's log func when x around 1.0f. */ if (glibc_mask != 0) { double NPY_DECL_ALIGNED(64) ip_fback[8]; - _mm512_store_pd(ip_fback, x_in); + /* Using a mask_store_pd instead of store_pd to prevent a fatal + * compiler optimization bug. See + * https://github.com/numpy/numpy/issues/27745#issuecomment-2498684564 + * for details.*/ + _mm512_mask_store_pd(ip_fback, avx512_get_full_load_mask_pd(), x_in); for (int ii = 0; ii < 8; ++ii, glibc_mask >>= 1) { if (glibc_mask & 0x01) { From b54ab62495e35a0ff3c65948b4ee6e3fd6d7fb9e Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 4 Dec 2024 15:50:59 -0700 Subject: [PATCH 017/187] MAINT: Update highway from main. --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index a97b5d371d69..68b0fdebffb1 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit a97b5d371d696564e206627a883b1341c65bd983 +Subproject commit 68b0fdebffb14f3b8473fed1c33ce368efc431e7 From b459f58118cfaa11326e40319b37a49114ceae65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Tue, 3 Dec 2024 15:54:06 +0100 Subject: [PATCH 018/187] Merge pull request #27891 from mtsokol/mtrand-dunder-module ENH: update `__module__` in `numpy.random` module --- numpy/random/_generator.pyx | 3 +++ numpy/random/mtrand.pyx | 11 +++++++++++ numpy/tests/test_public_api.py | 9 ++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ed44a82f25fe..ac2f64a0f81c 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -5082,3 +5082,6 @@ def default_rng(seed=None): # Otherwise we need to instantiate a new BitGenerator and Generator as # normal. return Generator(PCG64(seed)) + + +default_rng.__module__ = "numpy.random" diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 7db3b15fb2fb..a4322bf348fb 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4956,3 +4956,14 @@ __all__ = [ 'zipf', 'RandomState', ] + +seed.__module__ = "numpy.random" +ranf.__module__ = "numpy.random" +sample.__module__ = "numpy.random" +get_bit_generator.__module__ = "numpy.random" +set_bit_generator.__module__ = "numpy.random" + +for method_name in __all__[:-1]: + method = getattr(RandomState, method_name, None) + if method is not None: + method.__module__ = "numpy.random" diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index e5e6e4630633..33b91321d797 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -701,7 +701,7 @@ def test___module__attribute(): member_name not in [ "char", "core", "ctypeslib", "f2py", "ma", "lapack_lite", "mrecords", "testing", "tests", "polynomial", "typing", - "random", # cython disallows overriding __module__ + "mtrand", "bit_generator", ] and member not in visited_modules # not visited yet ): @@ -728,6 +728,13 @@ def test___module__attribute(): ): continue + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + incorrect_entries.append( dict( Func=member.__name__, From 77fff6b39403ac7242c8f5e916940fcf43af7cc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Sok=C3=B3=C5=82?= <8431159+mtsokol@users.noreply.github.com> Date: Thu, 5 Dec 2024 12:25:47 +0100 Subject: [PATCH 019/187] ENH: Refactor ``__qualname__`` across API (#27877) * ENH: Refactor `__qualname__` across API * Apply review comments * Rehash `np.show_config` and add typing stubs * Revert typing change * Apply review comment --- numpy/__config__.py.in | 10 +++- numpy/__config__.pyi | 7 ++- numpy/__init__.py | 2 +- numpy/__init__.pyi | 2 + numpy/_core/multiarray.py | 1 + numpy/_core/strings.py | 1 + numpy/_core/tests/test_umath.py | 4 +- numpy/_typing/_ufunc.pyi | 10 ++++ numpy/ma/core.py | 6 ++- numpy/ma/extras.py | 1 + numpy/ma/timer_comparison.py | 10 ++-- numpy/random/mtrand.pyx | 2 + numpy/tests/test_public_api.py | 64 ++++++++++++++++++++++- numpy/typing/tests/data/reveal/ufuncs.pyi | 9 +++- 14 files changed, 115 insertions(+), 14 deletions(-) diff --git a/numpy/__config__.py.in b/numpy/__config__.py.in index 0040847708cc..a62f531c3769 100644 --- a/numpy/__config__.py.in +++ b/numpy/__config__.py.in @@ -7,7 +7,7 @@ from numpy._core._multiarray_umath import ( __cpu_dispatch__, ) -__all__ = ["show"] +__all__ = ["show_config"] _built_with_meson = True @@ -161,4 +161,10 @@ def show(mode=DisplayModes.stdout.value): f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" ) -show.__module__ = "numpy" + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/numpy/__config__.pyi b/numpy/__config__.pyi index bfb13bae1cda..bd01228a1cc8 100644 --- a/numpy/__config__.pyi +++ b/numpy/__config__.pyi @@ -81,7 +81,7 @@ _ConfigDict = TypedDict( ### -__all__ = ["show"] +__all__ = ["show_config"] CONFIG: Final[_ConfigDict] = ... @@ -95,3 +95,8 @@ def _check_pyyaml() -> ModuleType: ... def show(mode: L["stdout"] = "stdout") -> None: ... @overload def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/numpy/__init__.py b/numpy/__init__.py index 411db19fc98b..2a4fd03b6a44 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -111,7 +111,7 @@ from . import _distributor_init try: - from numpy.__config__ import show as show_config + from numpy.__config__ import show_config except ImportError as e: msg = """Error importing numpy: you should not try to import numpy from its source directory; please exit the numpy source tree, and relaunch diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 9d3492eac003..a0287a3f6e96 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4386,6 +4386,8 @@ class ufunc: @property def __name__(self) -> LiteralString: ... @property + def __qualname__(self) -> LiteralString: ... + @property def __doc__(self) -> str: ... @property def nin(self) -> int: ... diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index b50f319ecf0b..088de1073e7e 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -91,6 +91,7 @@ def _override___module__(): ]: ufunc = namespace_names[ufunc_name] ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name _override___module__() diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 87ab150adc31..b751b5d773a0 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -55,6 +55,7 @@ def _override___module__(): istitle, isupper, str_len, ]: ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ _override___module__() diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index cef0348c2dac..4d56c785d5a7 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -4019,7 +4019,9 @@ def test_array_ufunc_direct_call(self): def test_ufunc_docstring(self): original_doc = np.add.__doc__ new_doc = "new docs" - expected_dict = {} if IS_PYPY else {"__module__": "numpy"} + expected_dict = ( + {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} + ) np.add.__doc__ = new_doc assert np.add.__doc__ == new_doc diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 64c1d4647b7f..997d297f65d9 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -77,6 +77,8 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -146,6 +148,8 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -258,6 +262,8 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -326,6 +332,8 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... @@ -381,6 +389,8 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature] @property def __name__(self) -> _NameType: ... @property + def __qualname__(self) -> _NameType: ... + @property def ntypes(self) -> _NTypes: ... @property def identity(self) -> _IDType: ... diff --git a/numpy/ma/core.py b/numpy/ma/core.py index b76d090add03..97d6c9eafa5a 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -21,12 +21,12 @@ """ # pylint: disable-msg=E1002 import builtins +import functools import inspect import operator import warnings import textwrap import re -from functools import reduce from typing import Dict import numpy as np @@ -939,6 +939,7 @@ def __init__(self, ufunc): self.f = ufunc self.__doc__ = ufunc.__doc__ self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ def __str__(self): return f"Masked version of {self.f}" @@ -3157,7 +3158,7 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] - m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func) if domain is not None: @@ -7099,6 +7100,7 @@ class _frommethod: def __init__(self, methodname, reversed=False): self.__name__ = methodname + self.__qualname__ = methodname self.__doc__ = self.getdoc() self.reversed = reversed diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index d9d8e124d31d..bdc35c424ce3 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -249,6 +249,7 @@ class _fromnxfunction: def __init__(self, funcname): self.__name__ = funcname + self.__qualname__ = funcname self.__doc__ = self.getdoc() def getdoc(self): diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py index 9ae4c63c8e9a..9c157308fcbd 100644 --- a/numpy/ma/timer_comparison.py +++ b/numpy/ma/timer_comparison.py @@ -1,5 +1,5 @@ +import functools import timeit -from functools import reduce import numpy as np import numpy._core.fromnumeric as fromnumeric @@ -133,10 +133,10 @@ def test_1(self): xf = np.where(m1, 1.e+20, x) xm.set_fill_value(1.e+20) - assert((xm-ym).filled(0).any()) + assert (xm-ym).filled(0).any() s = x.shape - assert(xm.size == reduce(lambda x, y:x*y, s)) - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + assert xm.size == functools.reduce(lambda x, y: x*y, s) + assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) for s in [(4, 3), (6, 2)]: x.shape = s @@ -144,7 +144,7 @@ def test_1(self): xm.shape = s ym.shape = s xf.shape = s - assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1)) + assert self.count(xm) == len(m1) - functools.reduce(lambda x, y: x+y, m1) @np.errstate(all='ignore') def test_2(self): diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index a4322bf348fb..49c0257167ad 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4967,3 +4967,5 @@ for method_name in __all__[:-1]: method = getattr(RandomState, method_name, None) if method is not None: method.__module__ = "numpy.random" + +del method, method_name diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 33b91321d797..b25818c62d31 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -1,3 +1,4 @@ +import functools import sys import sysconfig import subprocess @@ -681,7 +682,7 @@ def test_functions_single_location(): assert len(duplicated_functions) == 0, duplicated_functions -def test___module__attribute(): +def test___module___attribute(): modules_queue = [np] visited_modules = {np} visited_functions = set() @@ -746,3 +747,64 @@ def test___module__attribute(): if incorrect_entries: assert len(incorrect_entries) == 0, incorrect_entries + + +def _check___qualname__(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + ( + # for bound methods check qualname match + module_name.startswith("numpy.random") and + actual_obj.__qualname__ == qualname + ) + ) + + +def test___qualname___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in [ + "f2py", "ma", "tests", "testing", "typing", + "bit_generator", "ctypeslib", "lapack_lite", + ] and # skip modules + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check___qualname__(member) and + member not in visited_functions + ): + incorrect_entries.append( + dict( + actual=member.__qualname__, expected=member.__name__, + ) + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index fc2345289236..8d3527ac8415 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -14,6 +14,7 @@ assert_type(np.absolute.__doc__, str) assert_type(np.absolute.types, list[str]) assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) assert_type(np.absolute.ntypes, Literal[20]) assert_type(np.absolute.identity, None) assert_type(np.absolute.nin, Literal[1]) @@ -26,6 +27,7 @@ assert_type(np.absolute(AR_f8), npt.NDArray[Any]) assert_type(np.absolute.at(AR_f8, AR_i8), None) assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) assert_type(np.add.ntypes, Literal[22]) assert_type(np.add.identity, Literal[0]) assert_type(np.add.nin, Literal[2]) @@ -42,6 +44,7 @@ assert_type(np.add.outer(f8, f8), Any) assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) assert_type(np.frexp.ntypes, Literal[4]) assert_type(np.frexp.identity, None) assert_type(np.frexp.nin, Literal[1]) @@ -52,6 +55,7 @@ assert_type(np.frexp(f8), tuple[Any, Any]) assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) assert_type(np.divmod.ntypes, Literal[15]) assert_type(np.divmod.identity, None) assert_type(np.divmod.nin, Literal[2]) @@ -62,6 +66,7 @@ assert_type(np.divmod(f8, f8), tuple[Any, Any]) assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) assert_type(np.matmul.ntypes, Literal[19]) assert_type(np.matmul.identity, None) assert_type(np.matmul.nin, Literal[2]) @@ -73,6 +78,7 @@ assert_type(np.matmul(AR_f8, AR_f8), Any) assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) assert_type(np.vecdot.ntypes, Literal[19]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot.nin, Literal[2]) @@ -82,7 +88,8 @@ assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) assert_type(np.vecdot.identity, None) assert_type(np.vecdot(AR_f8, AR_f8), Any) -assert_type(np.bitwise_count.__name__, Literal['bitwise_count']) +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) assert_type(np.bitwise_count.ntypes, Literal[11]) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count.nin, Literal[1]) From ee8d1cd01542836229ff1366c583e13db2b8d874 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 5 Dec 2024 06:13:31 -0700 Subject: [PATCH 020/187] PERF: improve multithreaded ufunc scaling (#27896) * PERF: add a fast path to ufunc type resolution * MAINT: move dispatching.c to C++ * MAINT: move npy_hashtable to C++ and use std::shared_mutex * MAINT: fix windows linking * MAINT: remove outdated comment * MAINT: only call try_promote on free-threaded build Converts dispatching to cpp in order to use `std::shared_mutex` to improve free-threaded scaling. * MAINT: try to give new function a name indicating it uses a mutex * MAINT: only do complicated casting to get a mutex pointer once * MAINT: use std::nothrow to avoid dealing with exceptions * DOC: add changelog --- .../upcoming_changes/27896.performance.rst | 2 + numpy/_core/code_generators/genapi.py | 2 +- numpy/_core/code_generators/generate_umath.py | 5 +- numpy/_core/include/numpy/ndarraytypes.h | 8 ++ numpy/_core/meson.build | 6 +- .../{npy_hashtable.c => npy_hashtable.cpp} | 21 ++++- numpy/_core/src/common/npy_hashtable.h | 11 +++ numpy/_core/src/multiarray/common.h | 20 +++-- numpy/_core/src/multiarray/npy_static_data.h | 8 ++ .../umath/{dispatching.c => dispatching.cpp} | 83 +++++++++++++++---- numpy/_core/src/umath/dispatching.h | 4 + numpy/_core/src/umath/ufunc_object.h | 7 ++ numpy/_core/src/umath/ufunc_type_resolution.h | 8 ++ 13 files changed, 152 insertions(+), 33 deletions(-) create mode 100644 doc/release/upcoming_changes/27896.performance.rst rename numpy/_core/src/common/{npy_hashtable.c => npy_hashtable.cpp} (92%) rename numpy/_core/src/umath/{dispatching.c => dispatching.cpp} (94%) diff --git a/doc/release/upcoming_changes/27896.performance.rst b/doc/release/upcoming_changes/27896.performance.rst new file mode 100644 index 000000000000..cf79dd19f558 --- /dev/null +++ b/doc/release/upcoming_changes/27896.performance.rst @@ -0,0 +1,2 @@ +* Improved multithreaded scaling on the free-threaded build when many threads + simultaneously call the same ufunc operations. diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index da2f8f636e59..3eb03b208ab6 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -85,7 +85,7 @@ def get_processor(): join('multiarray', 'stringdtype', 'static_string.c'), join('multiarray', 'strfuncs.c'), join('multiarray', 'usertypes.c'), - join('umath', 'dispatching.c'), + join('umath', 'dispatching.cpp'), join('umath', 'extobj.c'), join('umath', 'loops.c.src'), join('umath', 'reduction.c'), diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 1ff1fd019936..c810de1aec5f 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1592,13 +1592,10 @@ def make_code(funcdict, filename): #include "matmul.h" #include "clip.h" #include "dtypemeta.h" + #include "dispatching.h" #include "_umath_doc_generated.h" %s - /* Returns a borrowed ref of the second value in the matching info tuple */ - PyObject * - get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, - int ndtypes); static int InitOperators(PyObject *dictionary) { diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index ecbe3b49b229..7d1fa2f0e000 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ #define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ +#ifdef __cplusplus +extern "C" { +#endif + #include "npy_common.h" #include "npy_endian.h" #include "npy_cpu.h" @@ -1922,4 +1926,8 @@ typedef struct { */ #undef NPY_DEPRECATED_INCLUDES +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 979ceb2cfcfe..d32d71adc5dd 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -713,7 +713,7 @@ py.extension_module('_multiarray_tests', src_file.process('src/multiarray/_multiarray_tests.c.src'), 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.c', + 'src/common/npy_hashtable.cpp', src_file.process('src/common/templ_common.h.src') ], c_args: c_args_common, @@ -1042,7 +1042,7 @@ src_multiarray_umath_common = [ 'src/common/gil_utils.c', 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', - 'src/common/npy_hashtable.c', + 'src/common/npy_hashtable.cpp', 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ucsnarrow.c', @@ -1153,7 +1153,7 @@ src_umath = umath_gen_headers + [ 'src/umath/ufunc_type_resolution.c', 'src/umath/clip.cpp', 'src/umath/clip.h', - 'src/umath/dispatching.c', + 'src/umath/dispatching.cpp', 'src/umath/extobj.c', 'src/umath/legacy_array_method.c', 'src/umath/override.c', diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.cpp similarity index 92% rename from numpy/_core/src/common/npy_hashtable.c rename to numpy/_core/src/common/npy_hashtable.cpp index 596e62cf8354..de3194ac05d2 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -12,6 +12,9 @@ * case is likely desired. */ +#include +#include + #include "templ_common.h" #include "npy_hashtable.h" @@ -89,7 +92,7 @@ find_item(PyArrayIdentityHash const *tb, PyObject *const *key) NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len) { - PyArrayIdentityHash *res = PyMem_Malloc(sizeof(PyArrayIdentityHash)); + PyArrayIdentityHash *res = (PyArrayIdentityHash *)PyMem_Malloc(sizeof(PyArrayIdentityHash)); if (res == NULL) { PyErr_NoMemory(); return NULL; @@ -100,12 +103,21 @@ PyArrayIdentityHash_New(int key_len) res->size = 4; /* Start with a size of 4 */ res->nelem = 0; - res->buckets = PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); + res->buckets = (PyObject **)PyMem_Calloc(4 * (key_len + 1), sizeof(PyObject *)); if (res->buckets == NULL) { PyErr_NoMemory(); PyMem_Free(res); return NULL; } + +#ifdef Py_GIL_DISABLED + res->mutex = new(std::nothrow) std::shared_mutex(); + if (res->mutex == nullptr) { + PyErr_NoMemory(); + PyMem_Free(res); + return NULL; + } +#endif return res; } @@ -115,6 +127,9 @@ PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { PyMem_Free(tb->buckets); PyMem_Free(tb); +#ifdef Py_GIL_DISABLED + delete (std::shared_mutex *)tb->mutex; +#endif } @@ -149,7 +164,7 @@ _resize_if_necessary(PyArrayIdentityHash *tb) if (npy_mul_sizes_with_overflow(&alloc_size, new_size, tb->key_len + 1)) { return -1; } - tb->buckets = PyMem_Calloc(alloc_size, sizeof(PyObject *)); + tb->buckets = (PyObject **)PyMem_Calloc(alloc_size, sizeof(PyObject *)); if (tb->buckets == NULL) { tb->buckets = old_table; PyErr_NoMemory(); diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index a4252da87aff..cd061ba6fa11 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -7,12 +7,19 @@ #include "numpy/ndarraytypes.h" +#ifdef __cplusplus +extern "C" { +#endif + typedef struct { int key_len; /* number of identities used */ /* Buckets stores: val1, key1[0], key1[1], ..., val2, key2[0], ... */ PyObject **buckets; npy_intp size; /* current size */ npy_intp nelem; /* number of elements */ +#ifdef Py_GIL_DISABLED + void *mutex; +#endif } PyArrayIdentityHash; @@ -29,4 +36,8 @@ PyArrayIdentityHash_New(int key_len); NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb); +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_COMMON_NPY_NPY_HASHTABLE_H_ */ diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 6086f4d2c554..f4ba10d42e18 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -12,6 +12,10 @@ #include "npy_import.h" #include +#ifdef __cplusplus +extern "C" { +#endif + #define error_converting(x) (((x) == -1) && PyErr_Occurred()) #ifdef NPY_ALLOW_THREADS @@ -104,13 +108,13 @@ check_and_adjust_index(npy_intp *index, npy_intp max_item, int axis, /* Try to be as clear as possible about what went wrong. */ if (axis >= 0) { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for axis %d with size %"NPY_INTP_FMT, + "index %" NPY_INTP_FMT" is out of bounds " + "for axis %d with size %" NPY_INTP_FMT, *index, axis, max_item); } else { PyErr_Format(PyExc_IndexError, - "index %"NPY_INTP_FMT" is out of bounds " - "for size %"NPY_INTP_FMT, *index, max_item); + "index %" NPY_INTP_FMT " is out of bounds " + "for size %" NPY_INTP_FMT, *index, max_item); } return -1; } @@ -163,7 +167,9 @@ check_and_adjust_axis(int *axis, int ndim) * . * clang versions < 8.0.0 have the same bug. */ -#if (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ +#ifdef __cplusplus +#define NPY_ALIGNOF(type) alignof(type) +#elif (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112 \ || (defined __GNUC__ && __GNUC__ < 4 + (__GNUC_MINOR__ < 9) \ && !defined __clang__) \ || (defined __clang__ && __clang_major__ < 8)) @@ -347,4 +353,8 @@ new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, */ #define NPY_ITER_REDUCTION_AXIS(axis) (axis + (1 << (NPY_BITSOF_INT - 2))) +#ifdef __cplusplus +} +#endif + #endif /* NUMPY_CORE_SRC_MULTIARRAY_COMMON_H_ */ diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index 45e3fa0e151a..d6ee4a8dc54d 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -1,6 +1,10 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ #define NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int initialize_static_globals(void); @@ -168,4 +172,8 @@ NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; +#ifdef __cplusplus +} +#endif + #endif // NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.cpp similarity index 94% rename from numpy/_core/src/umath/dispatching.c rename to numpy/_core/src/umath/dispatching.cpp index 9e465dbe72a5..87b16cc176b8 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.cpp @@ -38,6 +38,9 @@ #define _MULTIARRAYMODULE #define _UMATHMODULE +#include +#include + #define PY_SSIZE_T_CLEAN #include #include @@ -504,8 +507,9 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyObject *promoter = PyTuple_GET_ITEM(info, 1); if (PyCapsule_CheckExact(promoter)) { /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); + PyArrayMethod_PromoterFunction *promoter_function = + (PyArrayMethod_PromoterFunction *)PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); if (promoter_function == NULL) { return NULL; } @@ -770,8 +774,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * 2. Check all registered loops/promoters to find the best match. * 3. Fall back to the legacy implementation if no match was found. */ - PyObject *info = PyArrayIdentityHash_GetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes); + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); if (info != NULL && PyObject_TypeCheck( PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { /* Found the ArrayMethod and NOT a promoter: return it */ @@ -793,8 +798,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * Found the ArrayMethod and NOT promoter. Before returning it * add it to the cache for faster lookup in the future. */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; @@ -815,8 +821,9 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } else if (info != NULL) { /* Add result to the cache using the original types: */ - if (PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; @@ -882,13 +889,51 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, } /* Add this to the cache using the original types: */ - if (cacheable && PyArrayIdentityHash_SetItem(ufunc->_dispatch_cache, - (PyObject **)op_dtypes, info, 0) < 0) { + if (cacheable && PyArrayIdentityHash_SetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes, info, 0) < 0) { return NULL; } return info; } +#ifdef Py_GIL_DISABLED +/* + * Fast path for promote_and_get_info_and_ufuncimpl. + * Acquires a read lock to check for a cache hit and then + * only acquires a write lock on a cache miss to fill the cache + */ +static inline PyObject * +promote_and_get_info_and_ufuncimpl_with_locking( + PyUFuncObject *ufunc, + PyArrayObject *const ops[], + PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *op_dtypes[], + npy_bool legacy_promotion_is_possible) +{ + std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); + mutex->lock_shared(); + PyObject *info = PyArrayIdentityHash_GetItem( + (PyArrayIdentityHash *)ufunc->_dispatch_cache, + (PyObject **)op_dtypes); + mutex->unlock_shared(); + + if (info != NULL && PyObject_TypeCheck( + PyTuple_GET_ITEM(info, 1), &PyArrayMethod_Type)) { + /* Found the ArrayMethod and NOT a promoter: return it */ + return info; + } + + // cache miss, need to acquire a write lock and recursively calculate the + // correct dispatch resolution + mutex->lock(); + info = promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); + mutex->unlock(); + + return info; +} +#endif /** * The central entry-point for the promotion and dispatching machinery. @@ -941,6 +986,8 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, { int nin = ufunc->nin, nargs = ufunc->nargs; npy_bool legacy_promotion_is_possible = NPY_TRUE; + PyObject *all_dtypes = NULL; + PyArrayMethodObject *method = NULL; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -976,18 +1023,20 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, } } - PyObject *info; - Py_BEGIN_CRITICAL_SECTION((PyObject *)ufunc); - info = promote_and_get_info_and_ufuncimpl(ufunc, +#ifdef Py_GIL_DISABLED + PyObject *info = promote_and_get_info_and_ufuncimpl_with_locking(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); +#else + PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); - Py_END_CRITICAL_SECTION(); +#endif if (info == NULL) { goto handle_error; } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); /* * In certain cases (only the logical ufuncs really), the loop we found may @@ -1218,7 +1267,7 @@ install_logical_ufunc_promoter(PyObject *ufunc) if (dtype_tuple == NULL) { return -1; } - PyObject *promoter = PyCapsule_New(&logical_ufunc_promoter, + PyObject *promoter = PyCapsule_New((void *)&logical_ufunc_promoter, "numpy._ufunc_promoter", NULL); if (promoter == NULL) { Py_DECREF(dtype_tuple); diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index 9bb5fbd9b013..95bcb32bf0ce 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -43,6 +43,10 @@ object_only_ufunc_promoter(PyObject *ufunc, NPY_NO_EXPORT int install_logical_ufunc_promoter(PyObject *ufunc); +NPY_NO_EXPORT PyObject * +get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, + int ndtypes); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/umath/ufunc_object.h b/numpy/_core/src/umath/ufunc_object.h index f8e522374394..dc55a561fba5 100644 --- a/numpy/_core/src/umath/ufunc_object.h +++ b/numpy/_core/src/umath/ufunc_object.h @@ -3,6 +3,9 @@ #include +#ifdef __cplusplus +extern "C" { +#endif NPY_NO_EXPORT const char* ufunc_get_name_cstr(PyUFuncObject *ufunc); @@ -10,4 +13,8 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc); NPY_NO_EXPORT PyObject * PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable); +#ifdef __cplusplus +} +#endif + #endif diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 3f8e7505ea39..9e812e97d6fe 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -1,6 +1,10 @@ #ifndef _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ #define _NPY_PRIVATE__UFUNC_TYPE_RESOLUTION_H_ +#ifdef __cplusplus +extern "C" { +#endif + NPY_NO_EXPORT int PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc, NPY_CASTING casting, @@ -142,4 +146,8 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, NPY_NO_EXPORT int raise_no_loop_found_error(PyUFuncObject *ufunc, PyObject **dtypes); +#ifdef __cplusplus +} +#endif + #endif From 2e0941fc20fbdc5289946c1b4606c5a66e73a624 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:41:01 +0000 Subject: [PATCH 021/187] MAINT: Bump actions/cache from 4.1.2 to 4.2.0 Bumps [actions/cache](https://github.com/actions/cache) from 4.1.2 to 4.2.0. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v4.1.2...v4.2.0) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/linux_qemu.yml | 2 +- .github/workflows/macos.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 4ef74bcfa7f8..d773152bb1bb 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -115,7 +115,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.1.2 + uses: actions/cache@v4.2.0 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 62fd24a4e337..d40ef9f60f20 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 From 1ed7b6712f94082ffbc15b4009fce94143624afc Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Dec 2024 13:14:37 -0700 Subject: [PATCH 022/187] REL: Prepare for the NumPy 2.2.0 release [wheel build] - Update .mailmap - Update pyproject.toml - Update 2.2.0-changelog.rst - Update 2.2.0-notes.rst - Remove fragment --- .mailmap | 6 +++++ doc/changelog/2.2.0-changelog.rst | 27 ++++++++++++------- .../upcoming_changes/27896.performance.rst | 2 -- doc/source/release/2.2.0-notes.rst | 5 ++++ pyproject.toml | 2 +- 5 files changed, 30 insertions(+), 12 deletions(-) delete mode 100644 doc/release/upcoming_changes/27896.performance.rst diff --git a/.mailmap b/.mailmap index ee897a292229..9a7b3aa5ef72 100644 --- a/.mailmap +++ b/.mailmap @@ -10,6 +10,7 @@ !8bitmp3 <19637339+8bitmp3@users.noreply.github.com> !Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> !DWesl <22566757+DWesl@users.noreply.github.com> +!Dreamge !Endolith !GalaxySnail !Illviljan <14371165+Illviljan@users.noreply.github.com> @@ -20,6 +21,7 @@ !Scian <65375075+hoony6134@users.noreply.github.com> !Searchingdays !amagicmuffin <2014wcheng@gmail.com> +!bersbersbers <12128514+bersbersbers@users.noreply.github.com> !code-review-doctor !cook-1229 <70235336+cook-1229@users.noreply.github.com> !dg3192 <113710955+dg3192@users.noreply.github.com> @@ -29,6 +31,7 @@ !fengluoqiuwu <163119756+fengluoqiuwu@users.noreply.github.com> !h-vetinari !h6197627 <44726212+h6197627@users.noreply.github.com> +!hutauf !jbCodeHub !juztamau5 !legoffant <58195095+legoffant@users.noreply.github.com> @@ -39,7 +42,9 @@ !mcp292 !mgunyho <20118130+mgunyho@users.noreply.github.com> !msavinash <73682349+msavinash@users.noreply.github.com> +!musvaage !mykykh <49101849+mykykh@users.noreply.github.com> +!nullSoup <34267803+nullSoup@users.noreply.github.com> !ogidig5 <82846833+ogidig5@users.noreply.github.com> !partev !pkubaj @@ -642,6 +647,7 @@ Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada +Stan Ulbrych <89152624+StanFromIreland@users.noreply.github.com> Stefan Behnel Stefan van der Walt Stefan van der Walt diff --git a/doc/changelog/2.2.0-changelog.rst b/doc/changelog/2.2.0-changelog.rst index 2a00d67b5736..b82a3d03b4fc 100644 --- a/doc/changelog/2.2.0-changelog.rst +++ b/doc/changelog/2.2.0-changelog.rst @@ -1,4 +1,3 @@ -Generating change log for range v2.2.0.dev0^..HEAD Contributors ============ @@ -6,8 +5,14 @@ Contributors A total of 106 people contributed to this release. People with a "+" by their names contributed a patch for the first time. +* !Dreamge + +* !bersbersbers + * !fengluoqiuwu + * !h-vetinari +* !hutauf + +* !musvaage + +* !nullSoup + +* Aarni Koskela + * Abhishek Kumar + * Abraham Medina + * Aditi Juneja + @@ -18,7 +23,6 @@ names contributed a patch for the first time. * Amit Subhash Chejara + * Andrew Nelson * Anne Gunn -* Aarni Koskela + * Austin Ran + * Ben Walsh * Benjamin A. Beasley + @@ -31,7 +35,6 @@ names contributed a patch for the first time. * Clément Robert * Dane Reimers + * Dimitri Papadopoulos Orfanos -* Dreamge + * Evgeni Burovski * GUAN MING * Habiba Hye + @@ -95,7 +98,7 @@ names contributed a patch for the first time. * Slava Gorloff + * Slobodan Miletic + * Soutrik Bandyopadhyay + -* Stan U. + +* Stan Ulbrych + * Stefan van der Walt * Tim Hoffmann * Timo Röhling @@ -106,17 +109,13 @@ names contributed a patch for the first time. * Warren Weckesser * Xiao Yuan + * Yashasvi Misra -* bersbersbers + * bilderbuchi + * dependabot[bot] -* hutauf + -* musvaage + -* nullSoup + Pull requests merged ==================== -A total of 307 pull requests were merged for this release. +A total of 317 pull requests were merged for this release. * `#14622 `__: BUG: fix datetime64/timedelta64 hash and match Python * `#15181 `__: ENH: Add nd-support to trim_zeros @@ -425,4 +424,14 @@ A total of 307 pull requests were merged for this release. * `#27845 `__: BUG: Never negate strides in reductions (for now) * `#27846 `__: ENH: add matvec and vecmat gufuncs * `#27852 `__: DOC: Correct versionadded for vecmat and matvec. +* `#27853 `__: REL: Prepare for the NumPy 2.2.0rc1 release [wheel build] +* `#27874 `__: BUG: fix importing numpy in Python's optimized mode (#27868) +* `#27895 `__: DOC: Fix double import in docs (#27878) +* `#27904 `__: MAINT: Ensure correct handling for very large unicode strings +* `#27906 `__: MAINT: Use mask_store instead of store for compiler workaround +* `#27908 `__: MAINT: Update highway from main. +* `#27911 `__: ENH: update __module__ in numpy.random module +* `#27912 `__: ENH: Refactor ``__qualname__`` across API +* `#27913 `__: PERF: improve multithreaded ufunc scaling +* `#27916 `__: MAINT: Bump actions/cache from 4.1.2 to 4.2.0 diff --git a/doc/release/upcoming_changes/27896.performance.rst b/doc/release/upcoming_changes/27896.performance.rst deleted file mode 100644 index cf79dd19f558..000000000000 --- a/doc/release/upcoming_changes/27896.performance.rst +++ /dev/null @@ -1,2 +0,0 @@ -* Improved multithreaded scaling on the free-threaded build when many threads - simultaneously call the same ufunc operations. diff --git a/doc/source/release/2.2.0-notes.rst b/doc/source/release/2.2.0-notes.rst index 01dc98078487..41b3d2b58004 100644 --- a/doc/source/release/2.2.0-notes.rst +++ b/doc/source/release/2.2.0-notes.rst @@ -125,6 +125,11 @@ multiple modules are present within a single source file. Performance improvements and changes ==================================== +* Improved multithreaded scaling on the free-threaded build when many threads + simultaneously call the same ufunc operations. + + (`gh-27896 `__) + * NumPy now uses fast-on-failure attribute lookups for protocols. This can greatly reduce overheads of function calls or array creation especially with custom Python objects. The largest improvements will be seen on Python 3.12 diff --git a/pyproject.toml b/pyproject.toml index 6aece2d9798d..ede5b3ebe299 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.0rc1" +version = "2.2.0" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From d97d0714325cecbbbd806054d5dd260b825c7752 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 7 Dec 2024 17:17:29 -0700 Subject: [PATCH 023/187] MAINT: Try fixing MacOS cirrus build failures. [wheel build] --- tools/ci/cirrus_wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index 4b06e5776612..aa1063d9f81d 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -78,7 +78,7 @@ macosx_arm64_task: build_script: | brew install micromamba gfortran - micromamba shell init -s bash -p ~/micromamba + micromamba shell init -s bash --root-prefix ~/micromamba source ~/.bash_profile micromamba create -n numpydev From 5a91e1405874e5787d4384a67ccd01b1f3be9446 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 8 Dec 2024 09:09:06 -0700 Subject: [PATCH 024/187] MAINT: Prepare 2.2.x for further development --- doc/source/release.rst | 1 + doc/source/release/2.2.1-notes.rst | 19 +++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.1-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index fd0702f4ae17..6e9ab8a22cc0 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.1 2.2.0 2.1.3 2.1.2 diff --git a/doc/source/release/2.2.1-notes.rst b/doc/source/release/2.2.1-notes.rst new file mode 100644 index 000000000000..b8f8d48078cb --- /dev/null +++ b/doc/source/release/2.2.1-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.1 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/pavement.py b/pavement.py index e8e63ee89f97..2d300fe35a5d 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.1-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index ede5b3ebe299..85dd7c088c53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.0" +version = "2.2.1" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From e0b53c0133cffcbb9aacb364a3a721c0ef4612f5 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Mon, 9 Dec 2024 11:46:13 +0200 Subject: [PATCH 025/187] TEST: cleanups [skip cirrus][skip azp] (#27943) --- numpy/f2py/tests/util.py | 1 - numpy/ma/tests/test_core.py | 7 ++++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index 9964c285e2bc..e2fcc1ba39d4 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -57,7 +57,6 @@ def check_language(lang, code_snippet=None): return runmeson.returncode == 0 finally: shutil.rmtree(tmpdir) - return False fortran77_code = ''' diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 17fa26c351d3..53651004db9a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -23,7 +23,7 @@ import numpy._core.umath as umath from numpy.exceptions import AxisError from numpy.testing import ( - assert_raises, assert_warns, suppress_warnings, IS_WASM + assert_raises, assert_warns, suppress_warnings, IS_WASM, temppath ) from numpy.testing._private.utils import requires_memory from numpy import ndarray @@ -1019,8 +1019,9 @@ def test_maskedarray_tofile_raises_notimplementederror(self): xm = masked_array([1, 2, 3], mask=[False, True, False]) # Test case to check the NotImplementedError. # It is not implemented at this point of time. We can change this in future - with pytest.raises(NotImplementedError): - np.save('xm.np', xm) + with temppath(suffix='.npy') as path: + with pytest.raises(NotImplementedError): + np.save(path, xm) class TestMaskedArrayArithmetic: From c3faf141ef02ee84e1fe69e128bb314c730efa17 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 9 Dec 2024 12:26:19 -0700 Subject: [PATCH 026/187] BUG: fix use-after-free error in npy_hashtable.cpp (#27955) --- numpy/_core/src/common/npy_hashtable.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp index de3194ac05d2..2a0f76dcf73b 100644 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -126,10 +126,10 @@ NPY_NO_EXPORT void PyArrayIdentityHash_Dealloc(PyArrayIdentityHash *tb) { PyMem_Free(tb->buckets); - PyMem_Free(tb); #ifdef Py_GIL_DISABLED delete (std::shared_mutex *)tb->mutex; #endif + PyMem_Free(tb); } From 8e98cbe083b9648a3ca2a69671a587667196a4c6 Mon Sep 17 00:00:00 2001 From: Thomas A Caswell Date: Mon, 9 Dec 2024 15:49:11 -0500 Subject: [PATCH 027/187] BLD: add missing include (#27956) This is required to build with the main and 3.13 branches of CPython when built with freethredaing enabled. --- numpy/_core/src/common/npy_hashtable.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/common/npy_hashtable.cpp b/numpy/_core/src/common/npy_hashtable.cpp index de3194ac05d2..113e4f387241 100644 --- a/numpy/_core/src/common/npy_hashtable.cpp +++ b/numpy/_core/src/common/npy_hashtable.cpp @@ -17,6 +17,7 @@ #include "templ_common.h" #include "npy_hashtable.h" +#include From fb5acadb648d0fcd680227d6ada93d2c3162d6db Mon Sep 17 00:00:00 2001 From: ixgbe00 Date: Mon, 9 Dec 2024 19:17:31 +0800 Subject: [PATCH 028/187] BUG:fix compile error libatomic link test to meson.build --- numpy/meson.build | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/numpy/meson.build b/numpy/meson.build index 88c4029adae9..f914ef695f52 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -223,6 +223,8 @@ null_dep = dependency('', required : false) atomic_dep = null_dep code_non_lockfree = ''' #include + #include + #include int main() { struct { void *p; @@ -230,10 +232,10 @@ code_non_lockfree = ''' } x; x.p = NULL; x.u8v = 0; - uint8_t res = __atomic_load_n(x.u8v, __ATOMIC_SEQ_CST); - __atomic_store_n(x.u8v, 1, ATOMIC_SEQ_CST); - void *p = __atomic_load_n(x.p, __ATOMIC_SEQ_CST); - __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST) + uint8_t res = __atomic_load_n(&x.u8v, __ATOMIC_SEQ_CST); + __atomic_store_n(&x.u8v, 1, __ATOMIC_SEQ_CST); + void *p = __atomic_load_n((void **)x.p, __ATOMIC_SEQ_CST); + __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST); return 0; } ''' From e064b9a61aa433910eb785a8ac06b4001a8fac13 Mon Sep 17 00:00:00 2001 From: ixgbe00 Date: Wed, 11 Dec 2024 08:57:06 +0800 Subject: [PATCH 029/187] Update numpy/meson.build Co-authored-by: Nathan Goldbaum --- numpy/meson.build | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/meson.build b/numpy/meson.build index f914ef695f52..353f89398ca2 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -224,7 +224,6 @@ atomic_dep = null_dep code_non_lockfree = ''' #include #include - #include int main() { struct { void *p; From badf501788600cba3b2bb79ed63d4c1bfd9784f8 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 10 Dec 2024 21:49:25 +0100 Subject: [PATCH 030/187] TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` --- numpy/__init__.pyi | 67 +++++++++++++++++++++----- numpy/typing/tests/data/pass/simple.py | 10 ++-- 2 files changed, 62 insertions(+), 15 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a0287a3f6e96..a81e060f6233 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -844,7 +844,8 @@ _Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer[ _Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool _Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co -_ArrayIndexLike: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] _UnsignedIntegerCType: TypeAlias = type[ ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 @@ -982,6 +983,8 @@ if sys.version_info >= (3, 11): _ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co else: _ConvertibleToComplex: TypeAlias = complex | SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None _NDIterFlagsKind: TypeAlias = L[ "buffered", @@ -1070,7 +1073,7 @@ class _HasShapeAndSupportsItem(_HasShape[_ShapeT_co], _SupportsItem[_T_co], Prot # matches any `x` on `x.type.item() -> _T_co`, e.g. `dtype[np.int8]` gives `_T_co: int` @type_check_only -class _HashTypeWithItem(Protocol[_T_co]): +class _HasTypeWithItem(Protocol[_T_co]): @property def type(self, /) -> type[_SupportsItem[_T_co]]: ... @@ -1082,7 +1085,7 @@ class _HasShapeAndDTypeWithItem(Protocol[_ShapeT_co, _T_co]): @property def shape(self, /) -> _ShapeT_co: ... @property - def dtype(self, /) -> _HashTypeWithItem[_T_co]: ... + def dtype(self, /) -> _HasTypeWithItem[_T_co]: ... @type_check_only class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): @@ -1112,6 +1115,7 @@ class _HasDateAttributes(Protocol): @property def year(self) -> int: ... + ### Mixins (for internal use only) @type_check_only @@ -2006,7 +2010,6 @@ class _ArrayOrScalarCommon: correction: float = ..., ) -> _ArrayT: ... - class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] @property @@ -2082,16 +2085,56 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... @overload - def __getitem__(self, key: _ArrayIndexLike | tuple[_ArrayIndexLike, ...], /) -> ndarray[_Shape, _DType_co]: ... + def __getitem__(self, key: _ToIndices, /) -> ndarray[_Shape, _DType_co]: ... @overload def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype[Any]]: ... @overload def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... - @overload - def __setitem__(self: NDArray[void], key: str | list[str], value: ArrayLike, /) -> None: ... - @overload - def __setitem__(self, key: _ArrayIndexLike | tuple[_ArrayIndexLike, ...], value: ArrayLike, /) -> None: ... + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... @property def ctypes(self) -> _ctypes[int]: ... @@ -4122,16 +4165,16 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... @overload - def __init__(self: timedelta64[int], value: int, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... + def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... @overload def __init__( self: timedelta64[dt.timedelta], - value: dt.timedelta | int, + value: dt.timedelta | _IntLike_co, format: _TimeUnitSpec[_NativeTD64Unit] = ..., /, ) -> None: ... @overload - def __init__(self, value: int | bytes | str | dt.timedelta | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... # NOTE: Only a limited number of units support conversion # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py index 16c6e8eb5de5..8f44e6e76f83 100644 --- a/numpy/typing/tests/data/pass/simple.py +++ b/numpy/typing/tests/data/pass/simple.py @@ -71,8 +71,13 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_2d = np.ones((3, 3)) array_2d[:2, :2] -array_2d[..., 0] array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) # Other special methods len(array) @@ -80,8 +85,7 @@ def iterable_func(x: Iterable[object]) -> Iterable[object]: array_scalar = np.array(1) int(array_scalar) float(array_scalar) -# currently does not work due to https://github.com/python/typeshed/issues/1904 -# complex(array_scalar) +complex(array_scalar) bytes(array_scalar) operator.index(array_scalar) bool(array_scalar) From 531565c47b21546547097526754318c32ddfc9b1 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Thu, 12 Dec 2024 11:48:35 -0500 Subject: [PATCH 031/187] MAINT: Don't wrap ``#include `` with ``extern "C"`` This extern block was recently moved, which exposed a latent bug in CPython (https://github.com/python/cpython/pull/127772), but it's probably not a good practice in general to wrap other code's headers with extern guards. --- numpy/_core/include/numpy/ndarraytypes.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 7d1fa2f0e000..37788a74557f 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -1,15 +1,15 @@ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ #define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ -#ifdef __cplusplus -extern "C" { -#endif - #include "npy_common.h" #include "npy_endian.h" #include "npy_cpu.h" #include "utils.h" +#ifdef __cplusplus +extern "C" { +#endif + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN /* Always allow threading unless it was explicitly disabled at build time */ From 0282fc81cf6e279c6a9815bb5dfb1f5c4fc5eec4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 12 Dec 2024 17:22:06 -0700 Subject: [PATCH 032/187] BUG: Fix segfault in stringdtype lexsort (#27992) * BUG: do not assume REFCHK is the same as object in lexsort * TST: add tests for stringdtype argsort * TST: add tests for string lexsort --- numpy/_core/src/multiarray/item_selection.c | 3 +-- numpy/_core/tests/test_multiarray.py | 7 +++++++ numpy/_core/tests/test_stringdtype.py | 16 +++++++++++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index f3ce35f3092f..eadb7cc099d3 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2014,8 +2014,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis) } rcode = argsort(its[j]->dataptr, (npy_intp *)rit->dataptr, N, mps[j]); - if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j])) - && PyErr_Occurred())) { + if (rcode < 0 || (object && PyErr_Occurred())) { goto fail; } PyArray_ITER_NEXT(its[j]); diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 02ed3ece94b5..7ac22869495f 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -5374,6 +5374,13 @@ def test_object(self): # gh-6312 u, v = np.array(u, dtype='object'), np.array(v, dtype='object') assert_array_equal(idx, np.lexsort((u, v))) + def test_strings(self): # gh-27984 + for dtype in "TU": + surnames = np.array(['Hertz', 'Galilei', 'Hertz'], dtype=dtype) + first_names = np.array(['Heinrich', 'Galileo', 'Gustav'], dtype=dtype) + assert_array_equal(np.lexsort((first_names, surnames)), [1, 2, 0]) + + def test_invalid_axis(self): # gh-7528 x = np.linspace(0., 1., 42*3).reshape(42, 3) assert_raises(AxisError, np.lexsort, x, axis=2) diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 11e51d4957fc..ad4276f40a3e 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -415,8 +415,19 @@ def test_sort(dtype, strings): def test_sort(strings, arr_sorted): arr = np.array(strings, dtype=dtype) - np.random.default_rng().shuffle(arr) na_object = getattr(arr.dtype, "na_object", "") + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + np.argsort(arr) + argsorted = None + elif na_object is pd_NA or na_object != '': + argsorted = None + else: + argsorted = np.argsort(arr) + np.random.default_rng().shuffle(arr) if na_object is None and None in strings: with pytest.raises( ValueError, @@ -426,6 +437,9 @@ def test_sort(strings, arr_sorted): else: arr.sort() assert np.array_equal(arr, arr_sorted, equal_nan=True) + if argsorted is not None: + assert np.array_equal(argsorted, np.argsort(strings)) + # make a copy so we don't mutate the lists in the fixture strings = strings.copy() From 0d8444d4aabb4859ba42b13ef61af550ccd9a1a2 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 15 Dec 2024 11:14:31 -0500 Subject: [PATCH 033/187] MAINT: random: Tweak module code in mtrand.pyx to fix a Cython warning. Closes gh-27954. --- numpy/random/mtrand.pyx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index 49c0257167ad..26d0f5f4d1a4 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -4902,6 +4902,7 @@ def ranf(*args, **kwargs): return _rand.random_sample(*args, **kwargs) __all__ = [ + 'RandomState', 'beta', 'binomial', 'bytes', @@ -4954,7 +4955,6 @@ __all__ = [ 'wald', 'weibull', 'zipf', - 'RandomState', ] seed.__module__ = "numpy.random" @@ -4963,7 +4963,8 @@ sample.__module__ = "numpy.random" get_bit_generator.__module__ = "numpy.random" set_bit_generator.__module__ = "numpy.random" -for method_name in __all__[:-1]: +# The first item in __all__ is 'RandomState', so it can be skipped here. +for method_name in __all__[1:]: method = getattr(RandomState, method_name, None) if method is not None: method.__module__ = "numpy.random" From 4f82c3262f8a0a0229d0483b53e47694118b32a5 Mon Sep 17 00:00:00 2001 From: Warren Weckesser Date: Sun, 15 Dec 2024 14:02:32 -0500 Subject: [PATCH 034/187] BUG: Cython API was missing NPY_UINTP. Closes gh-27890. --- numpy/__init__.cython-30.pxd | 1 + numpy/__init__.pxd | 1 + numpy/_core/tests/examples/cython/checks.pyx | 6 ++++++ numpy/_core/tests/test_cython.py | 6 ++++++ 4 files changed, 14 insertions(+) diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 9fbdbc59d782..e35cef5fa1a8 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -151,6 +151,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 4aa14530ab4f..89fe913b9cd3 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -160,6 +160,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP + NPY_UINTP NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index c0bb1f3f5370..34359fb42fcb 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -266,3 +266,9 @@ def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): # This works in both modes arr[1].real = arr[1].real + 1 arr[1].imag = arr[1].imag + 1 + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index fce00a4927fc..d7fe28a8f053 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -295,3 +295,9 @@ def test_complex(install_temp): arr = np.array([0, 10+10j], dtype="F") inc2_cfloat_struct(arr) assert arr[1] == (12 + 12j) + + +def test_npy_uintp_type_enum(): + import checks + assert checks.check_npy_uintp_type_enum() + From 32f52a37bc0f7ba3ab0321e57ba8719c15848772 Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Tue, 17 Dec 2024 15:47:35 +0200 Subject: [PATCH 035/187] CI: pin scipy-doctest to 1.5.1 (#28020) Pin scipy-doctest to 1.5.1 until new errors in previously uncovered tests are fixed. --- .github/workflows/linux.yml | 2 +- requirements/doc_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 8b5461d945e9..b4826f2e1642 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -188,7 +188,7 @@ jobs: - name: Check docstests shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | - pip install scipy-doctest hypothesis matplotlib scipy pytz pandas + pip install scipy-doctest==1.5.1 hypothesis==6.104.1 matplotlib scipy pytz pandas spin check-docs -v spin check-tutorials -v diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index 74ef448182af..4dcf2a788df0 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -18,4 +18,4 @@ towncrier toml # for doctests, also needs pytz which is in test_requirements -scipy-doctest +scipy-doctest==1.5.1 From 554739ee490013cf2757c1ef1420d120d607836b Mon Sep 17 00:00:00 2001 From: Simon Altrogge <8720147+simonaltrogge@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:12:41 +0100 Subject: [PATCH 036/187] TYP: allow `None` in operand sequence of nditer Prevent type-hint errors when using `nditer` in an intended way (see https://numpy.org/doc/stable/reference/arrays.nditer.html#iterator-allocated-output-arrays). Fix #28038 --- numpy/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a81e060f6233..4972f3bf209c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4746,7 +4746,7 @@ class iinfo(Generic[_IntegerT_co]): class nditer: def __new__( cls, - op: ArrayLike | Sequence[ArrayLike], + op: ArrayLike | Sequence[ArrayLike | None], flags: None | Sequence[_NDIterFlagsKind] = ..., op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ..., op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., From 723605bcaf472514fbb34947e5b61daa135c0769 Mon Sep 17 00:00:00 2001 From: Simon Altrogge <8720147+simonaltrogge@users.noreply.github.com> Date: Fri, 20 Dec 2024 11:46:40 +0100 Subject: [PATCH 037/187] TST: Add test for allowing `None` in operand sequence passed to `nditer` --- numpy/typing/tests/data/pass/nditer.py | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 numpy/typing/tests/data/pass/nditer.py diff --git a/numpy/typing/tests/data/pass/nditer.py b/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 000000000000..25a5b44d7aec --- /dev/null +++ b/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) From acb051e2a3fc90c687079dbe8a8b56c2af507eb3 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 20 Dec 2024 11:55:48 -0700 Subject: [PATCH 038/187] REL: Prepare for the NumPy 2.2.1 release [wheel build] - Create 2.2.1-changelog.rst. - Update 2.2.1-notes.rst. - Update .mailmap. --- .mailmap | 1 + doc/changelog/2.2.1-changelog.rst | 34 +++++++++++++++++ doc/source/release/2.2.1-notes.rst | 59 ++++++++++++++++++++++++------ 3 files changed, 82 insertions(+), 12 deletions(-) create mode 100644 doc/changelog/2.2.1-changelog.rst diff --git a/.mailmap b/.mailmap index 9a7b3aa5ef72..4853320b7835 100644 --- a/.mailmap +++ b/.mailmap @@ -709,6 +709,7 @@ Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau +Yang Wang Yash Pethe Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra diff --git a/doc/changelog/2.2.1-changelog.rst b/doc/changelog/2.2.1-changelog.rst new file mode 100644 index 000000000000..ba3c4f19eb3f --- /dev/null +++ b/doc/changelog/2.2.1-changelog.rst @@ -0,0 +1,34 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer diff --git a/doc/source/release/2.2.1-notes.rst b/doc/source/release/2.2.1-notes.rst index b8f8d48078cb..fe60fa0268f3 100644 --- a/doc/source/release/2.2.1-notes.rst +++ b/doc/source/release/2.2.1-notes.rst @@ -4,16 +4,51 @@ NumPy 2.2.1 Release Notes ========================== +NumPy 2.2.1 is a patch release following 2.2.0. It fixes bugs found after the +2.2.0 release and has several maintenance pins to work around upstream changes. + +There was some breakage in downstream projects following the 2.2.0 release due +to updates to NumPy typing. Because of problems due to MyPy defects, we +recommend using basedpyright for type checking, it can be installed from +PyPI. The Pylance extension for Visual Studio Code is also based on Pyright. +Problems that persist when using basedpyright should be reported as issues +on the NumPy github site. + +This release supports Python 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Simon Altrogge +* Thomas A Caswell +* Warren Weckesser +* Yang Wang + + + +Pull requests merged +==================== + +A total of 12 pull requests were merged for this release. + +* `#27935 `__: MAINT: Prepare 2.2.x for further development +* `#27950 `__: TEST: cleanups [skip cirrus][skip azp] +* `#27958 `__: BUG: fix use-after-free error in npy_hashtable.cpp (#27955) +* `#27959 `__: BLD: add missing include +* `#27982 `__: BUG:fix compile error libatomic link test to meson.build +* `#27990 `__: TYP: Fix falsely rejected value types in ``ndarray.__setitem__`` +* `#27991 `__: MAINT: Don't wrap ``#include `` with ``extern "C"`` +* `#27993 `__: BUG: Fix segfault in stringdtype lexsort +* `#28006 `__: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython... +* `#28007 `__: BUG: Cython API was missing NPY_UINTP. +* `#28021 `__: CI: pin scipy-doctest to 1.5.1 +* `#28044 `__: TYP: allow ``None`` in operand sequence of nditer -Highlights -========== - -*We'll choose highlights for this release near the end of the release cycle.* - - -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) - -.. **Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst From 32f5e3a3e7365c7196166529830ac732b16440e9 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 21 Dec 2024 16:09:51 -0700 Subject: [PATCH 039/187] MAINT: Prepare 2.2.x for further development --- doc/source/release.rst | 1 + doc/source/release/2.2.2-notes.rst | 19 +++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.2-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 6e9ab8a22cc0..4c83cd3d1ae4 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.2 2.2.1 2.2.0 2.1.3 diff --git a/doc/source/release/2.2.2-notes.rst b/doc/source/release/2.2.2-notes.rst new file mode 100644 index 000000000000..271a16f4c32b --- /dev/null +++ b/doc/source/release/2.2.2-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.2 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/pavement.py b/pavement.py index 2d300fe35a5d..5b2e5578b08b 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.1-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.2-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 85dd7c088c53..7c6b19d91fe5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.1" +version = "2.2.2" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 06cb2aa51aa6f38d413e9ff95d6d09b276ecd6d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 22 Dec 2024 18:22:59 +0100 Subject: [PATCH 040/187] TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` --- numpy/__init__.pyi | 2 ++ numpy/typing/tests/data/pass/ndarray_misc.py | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 4972f3bf209c..a14234db2735 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2133,6 +2133,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], /, ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... @overload # catch-all def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index 7b8ebea52a16..fef9d519b78b 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -174,3 +174,10 @@ class SubClass(npt.NDArray[np.float64]): ... complex(np.array(1.0, dtype=np.float64)) operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] From f367808ee3ba0b88678d69b6db3ff374ec74b6ed Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Wed, 25 Dec 2024 18:03:59 +0100 Subject: [PATCH 041/187] TYP: fix unnecessarily broad `integer` binop return types (#28065) * TYP: fix unnecessarily broad `integer` binop return types * MAINT: Fix linter complaint. --------- Co-authored-by: Charles Harris --- numpy/_typing/_callable.pyi | 32 ++++++------------- numpy/typing/tests/data/reveal/arithmetic.pyi | 24 +++++++------- 2 files changed, 22 insertions(+), 34 deletions(-) diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 56e24fb73911..75af1ae8efba 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -151,19 +151,15 @@ class _IntTrueDiv(Protocol[_NBit1]): class _UnsignedIntOp(Protocol[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @overload - def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ... + def __call__(self, other: int, /) -> unsignedinteger[_NBit1]: ... @overload - def __call__(self, other: int | signedinteger[Any], /) -> Any: ... + def __call__(self, other: float, /) -> float64: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + def __call__(self, other: complex, /) -> complex128: ... @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + def __call__(self, other: unsignedinteger[_NBit2], /) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... @overload - def __call__( - self, other: unsignedinteger[_NBit2], / - ) -> unsignedinteger[_NBit1] | unsignedinteger[_NBit2]: ... + def __call__(self, other: signedinteger, /) -> Any: ... @type_check_only class _UnsignedIntBitOp(Protocol[_NBit1]): @@ -207,19 +203,13 @@ class _UnsignedIntDivMod(Protocol[_NBit1]): @type_check_only class _SignedIntOp(Protocol[_NBit1]): @overload - def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> signedinteger[_NBit1] | int_: ... + def __call__(self, other: int, /) -> signedinteger[_NBit1]: ... @overload - def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... + def __call__(self, other: float, /) -> float64: ... @overload - def __call__( - self, other: complex, / - ) -> complexfloating[_NBit1, _NBit1] | complex128: ... + def __call__(self, other: complex, /) -> complex128: ... @overload - def __call__( - self, other: signedinteger[_NBit2], / - ) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... + def __call__(self, other: signedinteger[_NBit2], /) -> signedinteger[_NBit1] | signedinteger[_NBit2]: ... @type_check_only class _SignedIntBitOp(Protocol[_NBit1]): @@ -261,9 +251,7 @@ class _SignedIntDivMod(Protocol[_NBit1]): @type_check_only class _FloatOp(Protocol[_NBit1]): @overload - def __call__(self, other: bool, /) -> floating[_NBit1]: ... - @overload - def __call__(self, other: int, /) -> floating[_NBit1] | floating[_NBitInt]: ... + def __call__(self, other: int, /) -> floating[_NBit1]: ... @overload def __call__(self, other: float, /) -> floating[_NBit1] | float64: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index c1eee5d3fc29..d89dd529ebe4 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -412,10 +412,10 @@ assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) assert_type(c16 + c16, np.complex128) assert_type(f8 + c16, np.complex128) -assert_type(i8 + c16, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c16, np.complex128) assert_type(c8 + c16, np.complex128 | np.complex64) assert_type(f4 + c16, np.complex128 | np.complex64) -assert_type(i4 + c16, np.complex128 | np.complex64) +assert_type(i4 + c16, np.complex128) assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) assert_type(c + c16, np.complex128) @@ -463,9 +463,9 @@ assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]]) assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) -assert_type(i8 + f8, np.floating[_64Bit]) +assert_type(i8 + f8, np.float64) assert_type(f4 + f8, np.floating[_32Bit] | np.float64) -assert_type(i4 + f8, np.floating[_32Bit] | np.float64) +assert_type(i4 + f8,np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) @@ -502,8 +502,8 @@ assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i8 + u4, Any) assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) -assert_type(i8 + c, np.complexfloating[_64Bit, _64Bit]) -assert_type(i8 + f, np.floating[_64Bit]) +assert_type(i8 + c, np.complex128) +assert_type(i8 + f, np.float64) assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) assert_type(u8 + u8, np.uint64) @@ -511,8 +511,8 @@ assert_type(u8 + i4, Any) assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) -assert_type(u8 + c, np.complexfloating[_64Bit, _64Bit]) -assert_type(u8 + f, np.floating[_64Bit]) +assert_type(u8 + c, np.complex128) +assert_type(u8 + f, np.float64) assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]]) assert_type(i8 + i8, np.int64) @@ -521,8 +521,8 @@ assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(u4 + i8, Any) assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) -assert_type(c + i8, np.complexfloating[_64Bit, _64Bit]) -assert_type(f + i8, np.floating[_64Bit]) +assert_type(c + i8, np.complex128) +assert_type(f + i8, np.float64) assert_type(AR_f + i8, npt.NDArray[np.floating[Any]]) assert_type(u8 + u8, np.uint64) @@ -530,8 +530,8 @@ assert_type(i4 + u8, Any) assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) -assert_type(c + u8, np.complexfloating[_64Bit, _64Bit]) -assert_type(f + u8, np.floating[_64Bit]) +assert_type(c + u8, np.complex128) +assert_type(f + u8, np.float64) assert_type(AR_f + u8, npt.NDArray[np.floating[Any]]) assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) From dfe61e9fa5207623d2203af682c719e83c3eb2a2 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 6 Jan 2025 18:04:12 +0100 Subject: [PATCH 042/187] TYP: Better ``ndarray`` binop return types for ``float64`` & ``complex128`` --- numpy/__init__.pyi | 491 +++++++++++++----- numpy/_typing/__init__.py | 8 +- numpy/_typing/_array_like.py | 6 + numpy/typing/tests/data/reveal/arithmetic.pyi | 116 +++-- .../tests/data/reveal/false_positives.pyi | 14 - numpy/typing/tests/data/reveal/mod.pyi | 16 +- 6 files changed, 438 insertions(+), 213 deletions(-) delete mode 100644 numpy/typing/tests/data/reveal/false_positives.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a14234db2735..395f13565208 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -23,11 +23,14 @@ from numpy._typing import ( _SupportsArray, _NestedSequence, _FiniteNestedSequence, + _ArrayLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, _ArrayLikeInt, _ArrayLikeInt_co, + _ArrayLikeFloat64_co, _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, _ArrayLikeComplex_co, _ArrayLikeNumber_co, _ArrayLikeTD64_co, @@ -800,6 +803,7 @@ _1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], Unpack[tuple[L[1], ...]]]) # _SCT = TypeVar("_SCT", bound=generic) _SCT_co = TypeVar("_SCT_co", bound=generic, covariant=True) _NumberT = TypeVar("_NumberT", bound=number[Any]) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) _FloatingT_co = TypeVar("_FloatingT_co", bound=floating[Any], default=floating[Any], covariant=True) _IntegerT = TypeVar("_IntegerT", bound=integer) _IntegerT_co = TypeVar("_IntegerT_co", bound=integer[Any], default=integer[Any], covariant=True) @@ -833,14 +837,16 @@ _1D: TypeAlias = tuple[int] _2D: TypeAlias = tuple[int, int] _2Tuple: TypeAlias = tuple[_T, _T] -_ArrayUInt_co: TypeAlias = NDArray[np.bool | unsignedinteger[Any]] -_ArrayInt_co: TypeAlias = NDArray[np.bool | integer[Any]] -_ArrayFloat_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any]] -_ArrayComplex_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] -_ArrayNumber_co: TypeAlias = NDArray[np.bool | number[Any]] -_ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] -_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer[Any] | np.bool +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool _Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool _Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co @@ -2617,111 +2623,192 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): ) -> ndarray[_ShapeT, dtype[floating[_AnyNBitInexact]]]: ... @overload def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case + @overload + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... @overload def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[overload-overlap] @overload def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[floating[_64Bit]], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[floating[_64Bit]], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload @@ -2731,20 +2818,34 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __radd__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload @@ -2754,20 +2855,34 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload @@ -2777,22 +2892,36 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rsub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload @@ -2801,156 +2930,252 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... @overload def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co, other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __truediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayInt_co, other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... @overload - def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __floordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... + def __rfloordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] + def __pow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... + def __pow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + def __pow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... + def __rpow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + def __rpow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload - def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload - def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 687e124ec2bb..dd9b133ddf88 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -121,15 +121,14 @@ NDArray as NDArray, ArrayLike as ArrayLike, _ArrayLike as _ArrayLike, - _FiniteNestedSequence as _FiniteNestedSequence, - _SupportsArray as _SupportsArray, - _SupportsArrayFunc as _SupportsArrayFunc, _ArrayLikeInt as _ArrayLikeInt, _ArrayLikeBool_co as _ArrayLikeBool_co, _ArrayLikeUInt_co as _ArrayLikeUInt_co, _ArrayLikeInt_co as _ArrayLikeInt_co, _ArrayLikeFloat_co as _ArrayLikeFloat_co, + _ArrayLikeFloat64_co as _ArrayLikeFloat64_co, _ArrayLikeComplex_co as _ArrayLikeComplex_co, + _ArrayLikeComplex128_co as _ArrayLikeComplex128_co, _ArrayLikeNumber_co as _ArrayLikeNumber_co, _ArrayLikeTD64_co as _ArrayLikeTD64_co, _ArrayLikeDT64_co as _ArrayLikeDT64_co, @@ -140,6 +139,9 @@ _ArrayLikeString_co as _ArrayLikeString_co, _ArrayLikeAnyString_co as _ArrayLikeAnyString_co, _ArrayLikeUnknown as _ArrayLikeUnknown, + _FiniteNestedSequence as _FiniteNestedSequence, + _SupportsArray as _SupportsArray, + _SupportsArrayFunc as _SupportsArrayFunc, _UnknownType as _UnknownType, ) diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 27b59b75373a..292b99d918e2 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -21,6 +21,7 @@ str_, bytes_, ) +from ._nbit_base import _32Bit, _64Bit from ._nested_sequence import _NestedSequence from ._shape import _Shape @@ -165,6 +166,11 @@ def __array_function__( _ArrayLikeString_co ) +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float | int] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex | float | int] + # NOTE: This includes `builtins.bool`, but not `numpy.bool`. _ArrayLikeInt: TypeAlias = _DualArrayLike[ dtype[integer[Any]], diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index d89dd529ebe4..46ac003508c4 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -51,6 +51,7 @@ AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] AR_number: npt.NDArray[np.number[Any]] +AR_Any: npt.NDArray[Any] AR_LIKE_b: list[bool] AR_LIKE_u: list[np.uint32] @@ -61,18 +62,19 @@ AR_LIKE_m: list[np.timedelta64] AR_LIKE_M: list[np.datetime64] AR_LIKE_O: list[np.object_] + # Array subtraction assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]]) -assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_b - AR_LIKE_O, Any) -assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]]) @@ -80,7 +82,7 @@ assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_b, Any) -assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]]) @@ -88,7 +90,7 @@ assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_u - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]]) @@ -97,7 +99,7 @@ assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_u, Any) -assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]]) @@ -105,7 +107,7 @@ assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) assert_type(AR_i - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]]) @@ -114,32 +116,32 @@ assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) assert_type(AR_LIKE_O - AR_i, Any) -assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_f - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]]) assert_type(AR_LIKE_O - AR_f, Any) -assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) assert_type(AR_c - AR_LIKE_O, Any) -assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) -assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) assert_type(AR_LIKE_O - AR_c, Any) assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) @@ -186,53 +188,53 @@ assert_type(AR_LIKE_O - AR_O, Any) # Array floor division assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) -assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_b // AR_LIKE_O, Any) assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) -assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_O // AR_b, Any) -assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_u // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.unsignedinteger[Any]]) +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]]) assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_u, Any) -assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]]) assert_type(AR_i // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]]) assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]]) assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_i, Any) -assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.floating[Any]]) -assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) assert_type(AR_f // AR_LIKE_O, Any) -assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.floating[Any]]) -assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.floating[Any]]) +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) assert_type(AR_LIKE_O // AR_f, Any) @@ -407,7 +409,7 @@ assert_type(c16 + b_, np.complex128) assert_type(c16 + b, np.complex128) assert_type(c16 + c, np.complex128) assert_type(c16 + f, np.complex128) -assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) assert_type(f16 + c16, np.complex128 | np.complexfloating[_128Bit, _128Bit]) assert_type(c16 + c16, np.complex128) @@ -420,7 +422,7 @@ assert_type(b_ + c16, np.complex128) assert_type(b + c16, np.complex128) assert_type(c + c16, np.complex128) assert_type(f + c16, np.complex128) -assert_type(AR_f + c16, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) assert_type(c8 + f16, np.complexfloating[_32Bit, _32Bit] | np.complexfloating[_128Bit, _128Bit]) assert_type(c8 + c16, np.complex64 | np.complex128) @@ -433,7 +435,7 @@ assert_type(c8 + b_, np.complex64) assert_type(c8 + b, np.complex64) assert_type(c8 + c, np.complex64 | np.complex128) assert_type(c8 + f, np.complex64 | np.complex128) -assert_type(c8 + AR_f, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) assert_type(c16 + c8, np.complex128) @@ -446,7 +448,7 @@ assert_type(b_ + c8, np.complex64) assert_type(b + c8, np.complex64) assert_type(c + c8, np.complex64 | np.complex128) assert_type(f + c8, np.complex64 | np.complex128) -assert_type(AR_f + c8, npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) # Float @@ -459,18 +461,18 @@ assert_type(f8 + b_, np.float64) assert_type(f8 + b, np.float64) assert_type(f8 + c, np.float64 | np.complex128) assert_type(f8 + f, np.float64) -assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) assert_type(f16 + f8, np.floating[_128Bit] | np.float64) assert_type(f8 + f8, np.float64) assert_type(i8 + f8, np.float64) -assert_type(f4 + f8, np.floating[_32Bit] | np.float64) +assert_type(f4 + f8, np.float32 | np.float64) assert_type(i4 + f8,np.float64) assert_type(b_ + f8, np.float64) assert_type(b + f8, np.float64) assert_type(c + f8, np.complex128 | np.float64) assert_type(f + f8, np.float64) -assert_type(AR_f + f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + f8, npt.NDArray[np.float64]) assert_type(f4 + f16, np.float32 | np.floating[_128Bit]) assert_type(f4 + f8, np.float32 | np.float64) @@ -481,7 +483,7 @@ assert_type(f4 + b_, np.float32) assert_type(f4 + b, np.float32) assert_type(f4 + c, np.complex64 | np.complex128) assert_type(f4 + f, np.float32 | np.float64) -assert_type(f4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) assert_type(f16 + f4, np.floating[_128Bit] | np.float32) assert_type(f8 + f4, np.float64) @@ -492,7 +494,7 @@ assert_type(b_ + f4, np.float32) assert_type(b + f4, np.float32) assert_type(c + f4, np.complex64 | np.complex128) assert_type(f + f4, np.float64 | np.float32) -assert_type(AR_f + f4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + f4, npt.NDArray[np.float64]) # Int @@ -504,7 +506,7 @@ assert_type(i8 + b_, np.int64) assert_type(i8 + b, np.int64) assert_type(i8 + c, np.complex128) assert_type(i8 + f, np.float64) -assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(u8 + i4, Any) @@ -513,7 +515,7 @@ assert_type(u8 + b_, np.uint64) assert_type(u8 + b, np.uint64) assert_type(u8 + c, np.complex128) assert_type(u8 + f, np.float64) -assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i8, np.int64) assert_type(u8 + i8, Any) @@ -523,7 +525,7 @@ assert_type(b_ + i8, np.int64) assert_type(b + i8, np.int64) assert_type(c + i8, np.complex128) assert_type(f + i8, np.float64) -assert_type(AR_f + i8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i8, npt.NDArray[np.float64]) assert_type(u8 + u8, np.uint64) assert_type(i4 + u8, Any) @@ -532,13 +534,13 @@ assert_type(b_ + u8, np.uint64) assert_type(b + u8, np.uint64) assert_type(c + u8, np.complex128) assert_type(f + u8, np.float64) -assert_type(AR_f + u8, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u8, npt.NDArray[np.float64]) assert_type(i4 + i8, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i4 + i4, np.int32) assert_type(i4 + b_, np.int32) assert_type(i4 + b, np.int32) -assert_type(i4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) assert_type(u4 + i8, Any) assert_type(u4 + i4, Any) @@ -546,13 +548,13 @@ assert_type(u4 + u8, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(u4 + b_, np.uint32) assert_type(u4 + b, np.uint32) -assert_type(u4 + AR_f, npt.NDArray[np.floating[Any]]) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) assert_type(i8 + i4, np.signedinteger[_32Bit] | np.signedinteger[_64Bit]) assert_type(i4 + i4, np.int32) assert_type(b_ + i4, np.int32) assert_type(b + i4, np.int32) -assert_type(AR_f + i4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + i4, npt.NDArray[np.float64]) assert_type(i8 + u4, Any) assert_type(i4 + u4, Any) @@ -560,4 +562,8 @@ assert_type(u8 + u4, np.unsignedinteger[_32Bit] | np.unsignedinteger[_64Bit]) assert_type(u4 + u4, np.uint32) assert_type(b_ + u4, np.uint32) assert_type(b + u4, np.uint32) -assert_type(AR_f + u4, npt.NDArray[np.floating[Any]]) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/false_positives.pyi b/numpy/typing/tests/data/reveal/false_positives.pyi deleted file mode 100644 index 7ae95e16a720..000000000000 --- a/numpy/typing/tests/data/reveal/false_positives.pyi +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Any - -import numpy as np -import numpy.typing as npt - -from typing_extensions import assert_type - -AR_Any: npt.NDArray[Any] - -# Mypy bug where overload ambiguity is ignored for `Any`-parametrized types; -# xref numpy/numpy#20099 and python/mypy#11347 -# -# The expected output would be something akin to `npt.NDArray[Any]` -assert_type(AR_Any + 2, npt.NDArray[np.signedinteger[Any]]) diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index e7e6082753be..bd7a632b0a24 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -83,7 +83,7 @@ assert_type(i4 % i8, np.int64 | np.int32) assert_type(i4 % f8, np.float64 | np.float32) assert_type(i4 % i4, np.int32) assert_type(i4 % f4, np.float32) -assert_type(i8 % AR_b, npt.NDArray[np.signedinteger[Any]]) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) assert_type(divmod(i8, b), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) assert_type(divmod(i8, f), tuple[np.floating[_64Bit], np.floating[_64Bit]]) @@ -93,7 +93,7 @@ assert_type(divmod(i8, i4), tuple[np.signedinteger[_64Bit], np.signedinteger[_64 assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.floating[_32Bit], np.floating[_32Bit]]) assert_type(divmod(i4, i4), tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) assert_type(divmod(i4, f4), tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) assert_type(b % i8, np.signedinteger[_64Bit]) assert_type(f % i8, np.floating[_64Bit]) @@ -103,7 +103,7 @@ assert_type(i8 % i4, np.int64 | np.int32) assert_type(f8 % i4, np.float64) assert_type(i4 % i4, np.int32) assert_type(f4 % i4, np.float32) -assert_type(AR_b % i8, npt.NDArray[np.signedinteger[Any]]) +assert_type(AR_b % i8, npt.NDArray[np.int64]) assert_type(divmod(b, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]]) @@ -113,7 +113,7 @@ assert_type(divmod(i4, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64 assert_type(divmod(f4, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.floating[_32Bit], np.floating[_32Bit]]) assert_type(divmod(i4, i4), tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) assert_type(divmod(f4, i4), tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.signedinteger[Any]], npt.NDArray[np.signedinteger[Any]]]) +assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float @@ -121,25 +121,25 @@ assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) assert_type(i8 % f4, np.floating[_64Bit] | np.floating[_32Bit]) assert_type(f4 % f4, np.float32) -assert_type(f8 % AR_b, npt.NDArray[np.floating[Any]]) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) assert_type(divmod(f8, b), tuple[np.float64, np.float64]) assert_type(divmod(f8, f), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(b % f8, np.float64) assert_type(f % f8, np.float64) assert_type(f8 % f8, np.float64) assert_type(f8 % f8, np.float64) assert_type(f4 % f4, np.float32) -assert_type(AR_b % f8, npt.NDArray[np.floating[Any]]) +assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) assert_type(divmod(f, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) assert_type(divmod(f4, f8), tuple[np.float64, np.float64] | tuple[np.float32, np.float32]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.floating[Any]], npt.NDArray[np.floating[Any]]]) +assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) From 1fc37e65bd0b3d035ef30aed37a4f5a6d6244214 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 6 Jan 2025 19:41:24 +0100 Subject: [PATCH 043/187] TYP: Return the correct ``bool`` from ``issubdtype`` --- numpy/_core/numerictypes.pyi | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index c2a7cb6261d4..ace5913f0f84 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -177,12 +177,9 @@ class _TypeCodes(TypedDict): Datetime: L['Mm'] All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] -def isdtype( - dtype: dtype[Any] | type[Any], - kind: DTypeLike | tuple[DTypeLike, ...], -) -> builtins.bool: ... +def isdtype(dtype: dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ... -def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> bool: ... +def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ... typecodes: _TypeCodes ScalarType: tuple[ From 69c30fbd4082c4141cbdfdc9622ed83b40dc0014 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 6 Jan 2025 20:19:51 +0100 Subject: [PATCH 044/187] TYP: Always accept ``date[time]`` in the ``datetime64`` constructor --- numpy/__init__.pyi | 8 +++++--- numpy/typing/tests/data/pass/scalars.py | 9 +++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index a14234db2735..63c38b744acb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4311,11 +4311,13 @@ class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): @overload def __init__(self: datetime64[int], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... @overload - def __init__(self: datetime64[dt.datetime], value: int | bytes | str, format: _TimeUnitSpec[_NativeTimeUnit], /) -> None: ... + def __init__( + self: datetime64[dt.datetime], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> None: ... @overload - def __init__(self: datetime64[dt.date], value: int | bytes | str, format: _TimeUnitSpec[_DateUnit], /) -> None: ... + def __init__(self: datetime64[dt.date], value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> None: ... @overload - def __init__(self, value: bytes | str | None, format: _TimeUnitSpec = ..., /) -> None: ... + def __init__(self, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> None: ... @overload def __add__(self: datetime64[_AnyDT64Item], x: int | integer[Any] | np.bool, /) -> datetime64[_AnyDT64Item]: ... diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 01beb0b29f52..89f24cb92991 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -89,9 +89,18 @@ def __float__(self) -> float: np.datetime64("2019") np.datetime64(b"2019") np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) np.datetime64(np.datetime64()) np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") np.datetime64(None) np.datetime64(None, "D") From 0a62d0f649d6733c0fbad0acdda5d245639bb0b5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Thu, 2 Jan 2025 23:42:38 +0100 Subject: [PATCH 045/187] MAINT: bump ``mypy`` to ``1.14.1`` (#28089) * MAINT: bump `mypy` to `1.14.1` * TYP: fix new `mypy==1.14.1` type-test errors * TYP: backport `collections.abc.Buffer` for `npt.ArrayLike` on `python<3.11` --- environment.yml | 2 +- numpy/_typing/_array_like.py | 19 +++++++++---------- .../typing/tests/data/reveal/index_tricks.pyi | 12 ++++++------ requirements/test_requirements.txt | 2 +- 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/environment.yml b/environment.yml index ff9fd9e84c20..46655d750d0d 100644 --- a/environment.yml +++ b/environment.yml @@ -25,7 +25,7 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.13.0 + - mypy=1.14.1 - orjson # makes mypy faster # For building docs - sphinx>=4.5.0 diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 292b99d918e2..7798e5d5d751 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -88,17 +88,16 @@ def __array_function__( ) if sys.version_info >= (3, 12): - from collections.abc import Buffer - - ArrayLike: TypeAlias = Buffer | _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, - ] + from collections.abc import Buffer as _Buffer else: - ArrayLike: TypeAlias = _DualArrayLike[ - dtype[Any], - bool | int | float | complex | str | bytes, - ] + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[ + dtype[Any], + bool | int | float | complex | str | bytes, +] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 7f5dcf8ccc3e..1db10928d2f5 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -58,13 +58,13 @@ assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) -assert_type(np.index_exp[0:1], tuple[slice]) -assert_type(np.index_exp[0:1, None:3], tuple[slice, slice]) -assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, EllipsisType, list[int]]) +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) -assert_type(np.s_[0:1], slice) -assert_type(np.s_[0:1, None:3], tuple[slice, slice]) -assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice, EllipsisType, list[int]]) +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index dc28402d2cb5..7ea464dadc40 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -14,7 +14,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.13.0; platform_python_implementation != "PyPy" +mypy==1.14.1; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer From 57afaf591f9cc631b5c2f073236cdc9e868d66b4 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 7 Jan 2025 11:52:51 +0100 Subject: [PATCH 046/187] BUG: Fix auxdata initialization in ufunc slow path The reason this was not found earlier is that auxdata is currently set by most function and the fast path seems to be taken a shocking amount of times. There are no further similar missing inits. Closes gh-28117 --- numpy/_core/src/umath/ufunc_object.c | 2 +- numpy/_core/tests/test_nep50_promotions.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 8748ad5e4974..657330b6f4be 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -1108,7 +1108,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, * based on the fixed strides. */ PyArrayMethod_StridedLoop *strided_loop; - NpyAuxData *auxdata; + NpyAuxData *auxdata = NULL; npy_intp fixed_strides[NPY_MAXARGS]; NpyIter_GetInnerFixedStrideArray(iter, fixed_strides); diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index 688be5338437..9eec02239e34 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -237,6 +237,20 @@ def test_integer_comparison(sctype, other_val, comp): assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + @pytest.mark.parametrize("comp", [np.equal, np.not_equal, np.less_equal, np.less, np.greater_equal, np.greater]) From 383d1e1c59401558ac1deadc77ff76cffa77ac54 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 9 Jan 2025 10:34:48 -0700 Subject: [PATCH 047/187] BUG: move reduction initialization to ufunc initialization (#28123) * BUG: move reduction initialization to ufunc initialization * MAINT: refactor to call get_initial_from_ufunc during init * TST: add test for multithreaded reductions * MAINT: fix linter * Apply suggestions from code review Co-authored-by: Sebastian Berg * MAINT: simplify further --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/multiarraymodule.c | 37 +++++++-------- numpy/_core/src/umath/legacy_array_method.c | 45 +++++++++++++++---- numpy/_core/tests/test_multithreading.py | 14 ++++++ numpy/testing/_private/utils.py | 4 +- 4 files changed, 72 insertions(+), 28 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index c9d46d859f60..d337a84e9baf 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -5033,6 +5033,24 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + /* + * Initialize the default PyDataMem_Handler capsule singleton. + */ + PyDataMem_DefaultHandler = PyCapsule_New( + &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); + if (PyDataMem_DefaultHandler == NULL) { + goto err; + } + + /* + * Initialize the context-local current handler + * with the default PyDataMem_Handler capsule. + */ + current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); + if (current_handler == NULL) { + goto err; + } + if (initumath(m) != 0) { goto err; } @@ -5067,7 +5085,7 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - + if (npy_cache_import_runtime( "numpy.dtypes", "_add_dtype_helper", &npy_runtime_imports._add_dtype_helper) == -1) { @@ -5081,23 +5099,6 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { } PyDict_SetItemString(d, "StringDType", (PyObject *)&PyArray_StringDType); - /* - * Initialize the default PyDataMem_Handler capsule singleton. - */ - PyDataMem_DefaultHandler = PyCapsule_New( - &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); - if (PyDataMem_DefaultHandler == NULL) { - goto err; - } - /* - * Initialize the context-local current handler - * with the default PyDataMem_Handler capsule. - */ - current_handler = PyContextVar_New("current_allocator", PyDataMem_DefaultHandler); - if (current_handler == NULL) { - goto err; - } - // initialize static reference to a zero-like array npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 9592df0e1366..705262fedd38 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -311,7 +311,7 @@ get_initial_from_ufunc( } } else if (context->descriptors[0]->type_num == NPY_OBJECT - && !reduction_is_empty) { + && !reduction_is_empty) { /* Allows `sum([object()])` to work, but use 0 when empty. */ Py_DECREF(identity_obj); return 0; @@ -323,13 +323,6 @@ get_initial_from_ufunc( return -1; } - if (PyTypeNum_ISNUMBER(context->descriptors[0]->type_num)) { - /* For numbers we can cache to avoid going via Python ints */ - memcpy(context->method->legacy_initial, initial, - context->descriptors[0]->elsize); - context->method->get_reduction_initial = ©_cached_initial; - } - /* Reduction can use the initial value */ return 1; } @@ -427,11 +420,47 @@ PyArray_NewLegacyWrappingArrayMethod(PyUFuncObject *ufunc, }; PyBoundArrayMethodObject *bound_res = PyArrayMethod_FromSpec_int(&spec, 1); + if (bound_res == NULL) { return NULL; } PyArrayMethodObject *res = bound_res->method; + + // set cached initial value for numeric reductions to avoid creating + // a python int in every reduction + if (PyTypeNum_ISNUMBER(bound_res->dtypes[0]->type_num) && + ufunc->nin == 2 && ufunc->nout == 1) { + + PyArray_Descr *descrs[3]; + + for (int i = 0; i < 3; i++) { + // only dealing with numeric legacy dtypes so this should always be + // valid + descrs[i] = bound_res->dtypes[i]->singleton; + } + + PyArrayMethod_Context context = { + (PyObject *)ufunc, + bound_res->method, + descrs, + }; + + int ret = get_initial_from_ufunc(&context, 0, context.method->legacy_initial); + + if (ret < 0) { + Py_DECREF(bound_res); + return NULL; + } + + // only use the cached initial value if it's valid + if (ret > 0) { + context.method->get_reduction_initial = ©_cached_initial; + } + } + + Py_INCREF(res); Py_DECREF(bound_res); + return res; } diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 754688501c2d..2512b7c199dc 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -120,3 +120,17 @@ def legacy_125(): task1.start() task2.start() + +def test_parallel_reduction(): + # gh-28041 + NUM_THREADS = 50 + + b = threading.Barrier(NUM_THREADS) + + x = np.arange(1000) + + def closure(): + b.wait() + np.sum(x) + + run_threaded(closure, NUM_THREADS, max_workers=NUM_THREADS) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4ebfb54bd563..fafc4cd34a23 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2684,9 +2684,9 @@ def _get_glibc_version(): _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) -def run_threaded(func, iters, pass_count=False): +def run_threaded(func, iters, pass_count=False, max_workers=8): """Runs a function many times in parallel""" - with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as tpe: if pass_count: futures = [tpe.submit(func, i) for i in range(iters)] else: From 4606ced811f685a4bb79320e8fc5f8ceda347b48 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 9 Jan 2025 01:09:05 +0100 Subject: [PATCH 048/187] TYP: Fix ``interp`` to accept and return scalars Co-authored-by: PTUsumit <2301109104@ptuniv.edu.in> --- numpy/lib/_function_base_impl.pyi | 97 ++++++++++++++++--- .../tests/data/reveal/lib_function_base.pyi | 9 ++ 2 files changed, 91 insertions(+), 15 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index a55a4c3f6b81..214ad1f04f4b 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,4 @@ -from collections.abc import Sequence, Iterator, Callable, Iterable +from collections.abc import Sequence, Callable, Iterable from typing import ( Concatenate, Literal as L, @@ -15,8 +15,9 @@ from typing import ( ) from typing_extensions import deprecated +import numpy as np from numpy import ( - vectorize as vectorize, + vectorize, generic, integer, floating, @@ -35,19 +36,22 @@ from numpy._typing import ( NDArray, ArrayLike, DTypeLike, - _ShapeLike, - _ScalarLike_co, - _DTypeLike, _ArrayLike, + _DTypeLike, + _ShapeLike, _ArrayLikeBool_co, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, + _ArrayLikeNumber_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, _ArrayLikeObject_co, _FloatLike_co, _ComplexLike_co, + _NumberLike_co, + _ScalarLike_co, + _NestedSequence ) __all__ = [ @@ -303,24 +307,87 @@ def diff( append: ArrayLike = ..., ) -> NDArray[Any]: ... -@overload +@overload # float scalar def interp( - x: _ArrayLikeFloat_co, + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> float64: ... +@overload # float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], xp: _ArrayLikeFloat_co, fp: _ArrayLikeFloat_co, - left: None | _FloatLike_co = ..., - right: None | _FloatLike_co = ..., - period: None | _FloatLike_co = ..., + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, ) -> NDArray[float64]: ... -@overload +@overload # float scalar or array def interp( x: _ArrayLikeFloat_co, xp: _ArrayLikeFloat_co, - fp: _ArrayLikeComplex_co, - left: None | _ComplexLike_co = ..., - right: None | _ComplexLike_co = ..., - period: None | _FloatLike_co = ..., + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64] | float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128: ... +@overload # complex or float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128 | float64: ... +@overload # complex array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, ) -> NDArray[complex128]: ... +@overload # complex or float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64]: ... +@overload # complex scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128] | complex128: ... +@overload # complex or float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64] | complex128 | float64: ... @overload def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ... diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 6267163e4280..9cd06a36f3e0 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -94,6 +94,15 @@ assert_type(np.diff("bob", n=0), str) assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` +assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) +assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` + assert_type(np.angle(f8), np.floating[Any]) assert_type(np.angle(AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating[Any]]) From c8e22169af53fd5e8ec28a1531f670585b1569b9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 9 Jan 2025 14:12:30 -0700 Subject: [PATCH 049/187] BUG: call PyType_Ready in f2py to avoid data races --- numpy/f2py/rules.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index bf7b46c89f08..84137811a446 100644 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -245,6 +245,11 @@ if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); #endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + return m; } #ifdef __cplusplus From 8dc4ff81a641c234bedd360a497fd99aec6ab18f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 10 Jan 2025 13:12:28 -0700 Subject: [PATCH 050/187] BUG: remove unnecessary call to PyArray_UpdateFlags --- numpy/_core/src/multiarray/iterators.c | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 2806670d3e07..c3b6500f69d0 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -136,7 +136,6 @@ PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao) nd = PyArray_NDIM(ao); /* The legacy iterator only supports 32 dimensions */ assert(nd <= NPY_MAXDIMS_LEGACY_ITERS); - PyArray_UpdateFlags(ao, NPY_ARRAY_C_CONTIGUOUS); if (PyArray_ISCONTIGUOUS(ao)) { it->contiguous = 1; } From ef83ffadf14b0e326c125e14bbab832d328c4521 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 10 Jan 2025 13:47:31 -0700 Subject: [PATCH 051/187] TST: add some features to the run_threaded helper --- numpy/_core/tests/test_multithreading.py | 17 ++++++-------- numpy/testing/_private/utils.py | 28 +++++++++++++++++------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 2512b7c199dc..f4fe387a44e5 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -18,6 +18,7 @@ def func(seed): run_threaded(func, 500, pass_count=True) + def test_parallel_ufunc_execution(): # if the loop data cache or dispatch cache are not thread-safe # computing ufuncs simultaneously in multiple threads leads @@ -31,18 +32,14 @@ def func(): # see gh-26690 NUM_THREADS = 50 - b = threading.Barrier(NUM_THREADS) - a = np.ones(1000) - def f(): + def f(b): b.wait() return a.sum() - threads = [threading.Thread(target=f) for _ in range(NUM_THREADS)] + run_threaded(f, NUM_THREADS, max_workers=NUM_THREADS, pass_barrier=True) - [t.start() for t in threads] - [t.join() for t in threads] def test_temp_elision_thread_safety(): amid = np.ones(50000) @@ -121,16 +118,16 @@ def legacy_125(): task1.start() task2.start() + def test_parallel_reduction(): # gh-28041 NUM_THREADS = 50 - b = threading.Barrier(NUM_THREADS) - x = np.arange(1000) - def closure(): + def closure(b): b.wait() np.sum(x) - run_threaded(closure, NUM_THREADS, max_workers=NUM_THREADS) + run_threaded(closure, NUM_THREADS, max_workers=NUM_THREADS, + pass_barrier=True) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index fafc4cd34a23..1e84101d2e49 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -18,6 +18,7 @@ import pprint import sysconfig import concurrent.futures +import threading import numpy as np from numpy._core import ( @@ -2684,12 +2685,23 @@ def _get_glibc_version(): _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) -def run_threaded(func, iters, pass_count=False, max_workers=8): +def run_threaded(func, iters=8, pass_count=False, max_workers=8, + pass_barrier=False, outer_iterations=1): """Runs a function many times in parallel""" - with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as tpe: - if pass_count: - futures = [tpe.submit(func, i) for i in range(iters)] - else: - futures = [tpe.submit(func) for _ in range(iters)] - for f in futures: - f.result() + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + args = [] + if pass_barrier: + if max_workers != iters: + raise RuntimeError( + "Must set max_workers equal to the number of " + "iterations to avoid deadlocks.") + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + futures = [tpe.submit(func, i, *args) for i in range(iters)] + else: + futures = [tpe.submit(func, *args) for _ in range(iters)] + for f in futures: + f.result() From 422214363991fdea8c9553539244e3d082021b99 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 10 Jan 2025 13:47:55 -0700 Subject: [PATCH 052/187] TST: add test from gh-28042 --- numpy/_core/tests/test_multithreading.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index f4fe387a44e5..a7f3c8269a5b 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -131,3 +131,14 @@ def closure(b): run_threaded(closure, NUM_THREADS, max_workers=NUM_THREADS, pass_barrier=True) + + +def test_parallel_flat_iterator(): + x = np.arange(20).reshape(5, 4).T + + def closure(b): + b.wait() + for _ in range(100): + list(x.flat) + + run_threaded(closure, outer_iterations=100, pass_barrier=True) From 94ac8eecd6e4f8524bd85746e9739d424254a637 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 15 Jan 2025 16:53:55 -0700 Subject: [PATCH 053/187] BUG: Avoid data race in PyArray_CheckFromAny_int (#28154) * BUG: Avoid data race in PyArray_CheckFromAny_int * TST: add test * MAINT: simplify byteswapping code in PyArray_CheckFromAny_int * MAINT: drop ISBYTESWAPPED check --- numpy/_core/src/multiarray/ctors.c | 16 +++++----------- numpy/_core/src/multiarray/dtypemeta.h | 5 +++++ numpy/_core/tests/test_multithreading.py | 13 +++++++++++++ numpy/testing/_private/utils.py | 8 ++++++-- 4 files changed, 29 insertions(+), 13 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index c9f9ac3941a9..cfa8b9231818 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1829,18 +1829,12 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, { PyObject *obj; if (requires & NPY_ARRAY_NOTSWAPPED) { - if (!in_descr && PyArray_Check(op) && - PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { - in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); - if (in_descr == NULL) { - return NULL; - } - } - else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { - PyArray_DESCR_REPLACE(in_descr); + if (!in_descr && PyArray_Check(op)) { + in_descr = PyArray_DESCR((PyArrayObject *)op); + Py_INCREF(in_descr); } - if (in_descr && in_descr->byteorder != NPY_IGNORE) { - in_descr->byteorder = NPY_NATIVE; + if (in_descr) { + PyArray_DESCR_REPLACE_CANONICAL(in_descr); } } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index d1b0b13b4bca..8b3abbeb1883 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -285,6 +285,11 @@ PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) v, itemptr, arr); } +// Like PyArray_DESCR_REPLACE, but calls ensure_canonical instead of DescrNew +#define PyArray_DESCR_REPLACE_CANONICAL(descr) do { \ + PyArray_Descr *_new_ = NPY_DT_CALL_ensure_canonical(descr); \ + Py_XSETREF(descr, _new_); \ + } while(0) #endif /* NUMPY_CORE_SRC_MULTIARRAY_DTYPEMETA_H_ */ diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index a7f3c8269a5b..b614f2c76385 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -134,6 +134,7 @@ def closure(b): def test_parallel_flat_iterator(): + # gh-28042 x = np.arange(20).reshape(5, 4).T def closure(b): @@ -142,3 +143,15 @@ def closure(b): list(x.flat) run_threaded(closure, outer_iterations=100, pass_barrier=True) + + # gh-28143 + def prepare_args(): + return [np.arange(10)] + + def closure(x, b): + b.wait() + for _ in range(100): + y = np.arange(10) + y.flat[x] = x + + run_threaded(closure, pass_barrier=True, prepare_args=prepare_args) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 1e84101d2e49..3c2d398e8a29 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2686,12 +2686,16 @@ def _get_glibc_version(): def run_threaded(func, iters=8, pass_count=False, max_workers=8, - pass_barrier=False, outer_iterations=1): + pass_barrier=False, outer_iterations=1, + prepare_args=None): """Runs a function many times in parallel""" for _ in range(outer_iterations): with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as tpe): - args = [] + if prepare_args is None: + args = [] + else: + args = prepare_args() if pass_barrier: if max_workers != iters: raise RuntimeError( From feae11d3964d37bebc58c0239c50efcc8a4b6f89 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 22 Dec 2024 21:57:43 +0000 Subject: [PATCH 054/187] TST: Add f2py case regression Co-authored-by: germasch --- numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 | 5 +++++ numpy/f2py/tests/test_regression.py | 9 +++++++++ 2 files changed, 14 insertions(+) create mode 100644 numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 diff --git a/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 000000000000..1c4b8c192b1b --- /dev/null +++ b/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 335c8470d2af..c62f82ac3fc0 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -122,6 +122,15 @@ def test_gh26148b(self): assert(res[0] == 8) assert(res[1] == 15) +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + @pytest.mark.slow def test_gh26623(): # Including libraries with . should not generate an incorrect meson.build From a19acf1f66e6c8a57ecd37710b444e5bf8d10b89 Mon Sep 17 00:00:00 2001 From: Rohit Goswami Date: Sun, 22 Dec 2024 21:58:36 +0000 Subject: [PATCH 055/187] BUG: Fix casing for f2py directives --- numpy/f2py/auxfuncs.py | 7 ++++++- numpy/f2py/crackfortran.py | 8 +++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 095e2600f317..e926a52d1b51 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -26,7 +26,7 @@ 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', 'isallocatable', 'isarray', 'isarrayofstrings', 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', - 'iscomplex', + 'iscomplex', 'iscstyledirective', 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', @@ -423,6 +423,11 @@ def isrequired(var): return not isoptional(var) and isintent_nothide(var) +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + def isintent_in(var): if 'intent' not in var: return 1 diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 6eea03477808..94cb64abe035 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -510,11 +510,9 @@ def readfortrancode(ffile, dowithline=show, istop=1): origfinalline = '' else: if localdolowercase: - # lines with intent() should be lowered otherwise - # TestString::test_char fails due to mixed case - # f2py directives without intent() should be left untouched - # gh-2547, gh-27697, gh-26681 - finalline = ll.lower() if "intent" in ll.lower() or not is_f2py_directive else ll + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll else: finalline = ll origfinalline = ll From f782790c08c50120b68641b3557ab96f5c1964c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Jan 2025 20:55:08 +0100 Subject: [PATCH 056/187] TYP: Fix overlapping overloads issue in 2->1 ufuncs --- numpy/_typing/_ufunc.pyi | 160 +++++++++++++++++++++++++++------------ 1 file changed, 111 insertions(+), 49 deletions(-) diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index 997d297f65d9..b5ac0ff635dd 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -4,32 +4,32 @@ The signatures of the ufuncs are too varied to reasonably type with a single class. So instead, `ufunc` has been expanded into four private subclasses, one for each combination of `~ufunc.nin` and `~ufunc.nout`. - """ from typing import ( Any, Generic, + Literal, NoReturn, - TypedDict, - overload, + Protocol, + SupportsIndex, TypeAlias, + TypedDict, TypeVar, - Literal, - SupportsIndex, - Protocol, + overload, type_check_only, ) + from typing_extensions import LiteralString, Unpack import numpy as np -from numpy import ufunc, _CastingKind, _OrderKACF +from numpy import _CastingKind, _OrderKACF, ufunc from numpy.typing import NDArray -from ._shape import _ShapeLike -from ._scalars import _ScalarLike_co from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike _T = TypeVar("_T") _2Tuple: TypeAlias = tuple[_T, _T] @@ -61,6 +61,13 @@ class _SupportsArrayUFunc(Protocol): **kwargs: Any, ) -> Any: ... +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. @@ -72,6 +79,8 @@ class _SupportsArrayUFunc(Protocol): # NOTE: If 2 output types are returned then `out` must be a # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable +# pyright: reportIncompatibleMethodOverride=false + @type_check_only class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property @@ -162,34 +171,61 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i @property def signature(self) -> None: ... - @overload + @overload # (scalar, scalar) -> scalar def __call__( self, - __x1: _ScalarLike_co, - __x2: _ScalarLike_co, - out: None = ..., + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... - @overload + @overload # (array-like, array) -> array def __call__( self, - __x1: ArrayLike, - __x2: ArrayLike, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., + x1: ArrayLike, + x2: NDArray[np.generic], + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, *, - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: NDArray[np.generic], + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... def at( self, @@ -227,35 +263,61 @@ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i out: None | NDArray[Any] = ..., ) -> NDArray[Any]: ... - # Expand `**kwargs` into explicit keyword-only arguments - @overload + @overload # (scalar, scalar) -> scalar def outer( self, A: _ScalarLike_co, B: _ScalarLike_co, - /, *, - out: None = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> Any: ... - @overload - def outer( # type: ignore[misc] + @overload # (array-like, array) -> array + def outer( self, A: ArrayLike, + B: NDArray[np.generic], + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: NDArray[np.generic], B: ArrayLike, - /, *, - out: None | NDArray[Any] | tuple[NDArray[Any]] = ..., - where: None | _ArrayLikeBool_co = ..., - casting: _CastingKind = ..., - order: _OrderKACF = ..., - dtype: DTypeLike = ..., - subok: bool = ..., - signature: str | _3Tuple[None | str] = ..., + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... @type_check_only class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] From 6a5f537ca978ed9d5204aa9d24845abf330b765f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alicia=20Boya=20Garc=C3=ADa?= Date: Thu, 16 Jan 2025 22:20:02 +0100 Subject: [PATCH 057/187] TYP: preserve shape-type in ndarray.astype() This patch changes the return type in astype() from NDArray to ndarray so that shape information is preserved and adds tests for it. Similar changes are added to np.astype() for consistency. --- numpy/__init__.pyi | 4 ++-- numpy/_core/numeric.pyi | 8 ++++---- numpy/typing/tests/data/reveal/ndarray_conversion.pyi | 8 ++++++++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 742bc7442082..4ab62ecc8f34 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2500,7 +2500,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[_SCT]: ... + ) -> ndarray[_ShapeT_co, dtype[_SCT]]: ... @overload def astype( self, @@ -2509,7 +2509,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): casting: _CastingKind = ..., subok: builtins.bool = ..., copy: builtins.bool | _CopyMode = ..., - ) -> NDArray[Any]: ... + ) -> ndarray[_ShapeT_co, dtype[Any]]: ... @overload def view(self) -> Self: ... diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 41c9873877e0..d23300752cd7 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -872,15 +872,15 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... @overload def astype( - x: NDArray[Any], + x: ndarray[_ShapeType, dtype[Any]], dtype: _DTypeLike[_SCT], copy: bool = ..., device: None | L["cpu"] = ..., -) -> NDArray[_SCT]: ... +) -> ndarray[_ShapeType, dtype[_SCT]]: ... @overload def astype( - x: NDArray[Any], + x: ndarray[_ShapeType, dtype[Any]], dtype: DTypeLike, copy: bool = ..., device: None | L["cpu"] = ..., -) -> NDArray[Any]: ... +) -> ndarray[_ShapeType, dtype[Any]]: ... diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index 789585ec963b..b6909e64f780 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -11,6 +11,7 @@ i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ # item assert_type(i0_nd.item(), int) @@ -50,6 +51,13 @@ assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np. assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic[Any]]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[Any]]) + # byteswap assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) From b04e32c15e6b3ad268fe5b314209915e7779764e Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 16 Jan 2025 16:32:52 +0100 Subject: [PATCH 058/187] TYP: Fix missing and spurious top-level exports --- numpy/__init__.pyi | 98 +++++++++++++++--------- numpy/typing/tests/data/fail/modules.pyi | 1 - 2 files changed, 61 insertions(+), 38 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 742bc7442082..ce021af22b68 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1,3 +1,4 @@ +# ruff: noqa: I001 import builtins import sys import mmap @@ -206,17 +207,19 @@ else: ) from typing import ( - Literal as L, Any, + ClassVar, + Final, + Generic, + Literal as L, NoReturn, SupportsComplex, SupportsFloat, SupportsInt, SupportsIndex, - Final, - final, - ClassVar, TypeAlias, + TypedDict, + final, type_check_only, ) @@ -225,11 +228,13 @@ from typing import ( # library include `typing_extensions` stubs: # https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi from _typeshed import StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite -from typing_extensions import CapsuleType, Generic, LiteralString, Never, Protocol, Self, TypeVar, Unpack, deprecated, overload +from typing_extensions import CapsuleType, LiteralString, Never, Protocol, Self, TypeVar, Unpack, deprecated, overload from numpy import ( + char, core, ctypeslib, + dtypes, exceptions, f2py, fft, @@ -238,15 +243,22 @@ from numpy import ( ma, polynomial, random, - testing, - typing, - version, - dtypes, rec, - char, strings, + testing, + typing, ) +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, +) +if sys.version_info < (3, 12): + from numpy import distutils as distutils + from numpy._core.records import ( record, recarray, @@ -440,6 +452,7 @@ from numpy.lib._arraypad_impl import ( from numpy.lib._arraysetops_impl import ( ediff1d, + in1d, intersect1d, isin, setdiff1d, @@ -481,6 +494,8 @@ from numpy.lib._function_base_impl import ( bartlett, blackman, kaiser, + trapezoid, + trapz, i0, meshgrid, delete, @@ -488,7 +503,6 @@ from numpy.lib._function_base_impl import ( append, interp, quantile, - trapezoid, ) from numpy.lib._histograms_impl import ( @@ -627,13 +641,10 @@ from numpy.matrixlib import ( bmat, ) -__all__ = [ - "emath", "show_config", "version", "__version__", "__array_namespace_info__", - +__all__ = [ # noqa: RUF022 # __numpy_submodules__ - "linalg", "fft", "dtypes", "random", "polynomial", "ma", "exceptions", "lib", - "ctypeslib", "testing", "test", "rec", "char", "strings", - "core", "typing", "f2py", + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", # _core.__all__ "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", @@ -651,8 +662,8 @@ __all__ = [ "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", - "bitwise_not", "full", "full_like", "matmul", "vecdot", "shares_memory", - "may_share_memory", "_get_promotion_state", "_set_promotion_state", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", @@ -667,7 +678,7 @@ __all__ = [ "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", - "logical_and", "logical_not", "logical_or", "logical_xor", "maximum", "minimum", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", @@ -686,7 +697,7 @@ __all__ = [ "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", "printoptions", "format_float_positional", "format_float_scientific", "require", "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", - "errstate", "_no_nep50_warning", + "errstate", # _core.function_base.__all__ "logspace", "linspace", "geomspace", # _core.getlimits.__all__ @@ -696,7 +707,8 @@ __all__ = [ "vstack", # _core.einsumfunc.__all__ "einsum", "einsum_path", - + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", # lib._histograms_impl.__all__ "histogram", "histogramdd", "histogram_bin_edges", # lib._nanfunctions_impl.__all__ @@ -704,13 +716,12 @@ __all__ = [ "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", "nanquantile", # lib._function_base_impl.__all__ - # NOTE: `trapz` is omitted because it is deprecated "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", - "kaiser", "i0", "meshgrid", "delete", "insert", "append", "interp", "quantile", - "trapezoid", + "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", # lib._twodim_base_impl.__all__ "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", @@ -724,9 +735,8 @@ __all__ = [ "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", # lib._arraysetops_impl.__all__ - # NOTE: `in1d` is omitted because it is deprecated - "ediff1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", "unique", - "unique_all", "unique_counts", "unique_inverse", "unique_values", + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", # lib._ufunclike_impl.__all__ "fix", "isneginf", "isposinf", # lib._arraypad_impl.__all__ @@ -746,9 +756,9 @@ __all__ = [ "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", "diag_indices_from", - # matrixlib.__all__ - "matrix", "bmat", "asmatrix", -] + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip ### Constrained types (for internal use only) # Only use these for functions; never as generic type parameter. @@ -1047,6 +1057,16 @@ _IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] _TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] _TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + ### Protocols (for internal use only) @type_check_only @@ -1150,22 +1170,26 @@ class _IntegralMixin(_RealMixin): ### Public API __version__: Final[LiteralString] = ... -__array_api_version__: Final = "2023.12" -test: Final[PytestTester] = ... e: Final[float] = ... euler_gamma: Final[float] = ... +pi: Final[float] = ... inf: Final[float] = ... nan: Final[float] = ... -pi: Final[float] = ... - little_endian: Final[builtins.bool] = ... - False_: Final[np.bool[L[False]]] = ... True_: Final[np.bool[L[True]]] = ... - newaxis: Final[None] = None +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__expired_attributes__: Final[dict[LiteralString, LiteralString]] +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2023.12"]] = "2023.12" +test: Final[PytestTester] = ... + @final class dtype(Generic[_SCT_co]): names: None | tuple[builtins.str, ...] diff --git a/numpy/typing/tests/data/fail/modules.pyi b/numpy/typing/tests/data/fail/modules.pyi index c86627e0c8ea..541be15b24ae 100644 --- a/numpy/typing/tests/data/fail/modules.pyi +++ b/numpy/typing/tests/data/fail/modules.pyi @@ -13,6 +13,5 @@ np.math # E: Module has no attribute # e.g. one must first execute `import numpy.lib.recfunctions` np.lib.recfunctions # E: Module has no attribute -np.__NUMPY_SETUP__ # E: Module has no attribute np.__deprecated_attrs__ # E: Module has no attribute np.__expired_functions__ # E: Module has no attribute From 0d106a5162608e04f30688ed9226efdba765b405 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 18 Jan 2025 12:22:51 -0700 Subject: [PATCH 059/187] REL: Prepare for the NumPy 2.2.2 release [wheel build] - Create 2.2.2-changelog.rst. - Update 2.2.2-notes.rst. - Update .mailmap. --- .mailmap | 1 + doc/changelog/2.2.2-changelog.rst | 37 ++++++++++++++++++++ doc/source/release/2.2.2-notes.rst | 54 +++++++++++++++++++++++------- 3 files changed, 80 insertions(+), 12 deletions(-) create mode 100644 doc/changelog/2.2.2-changelog.rst diff --git a/.mailmap b/.mailmap index 4853320b7835..1ae0bce7f11a 100644 --- a/.mailmap +++ b/.mailmap @@ -398,6 +398,7 @@ Julien Schueller Junyan Ou Justus Magin Justus Magin +Kai Germaschewski Kai Striega Kai Striega Kasia Leszek diff --git a/doc/changelog/2.2.2-changelog.rst b/doc/changelog/2.2.2-changelog.rst new file mode 100644 index 000000000000..ac856c97174c --- /dev/null +++ b/doc/changelog/2.2.2-changelog.rst @@ -0,0 +1,37 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports diff --git a/doc/source/release/2.2.2-notes.rst b/doc/source/release/2.2.2-notes.rst index 271a16f4c32b..8a3de547ec81 100644 --- a/doc/source/release/2.2.2-notes.rst +++ b/doc/source/release/2.2.2-notes.rst @@ -4,16 +4,46 @@ NumPy 2.2.2 Release Notes ========================== +NumPy 2.2.2 is a patch release that fixes bugs found after the 2.2.1 release. +The number of typing fixes/updates is notable. This release supports Python +versions 3.10-3.13. + + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Alicia Boya García + +* Charles Harris +* Joren Hammudoglu +* Kai Germaschewski + +* Nathan Goldbaum +* PTUsumit + +* Rohit Goswami +* Sebastian Berg + + +Pull requests merged +==================== + +A total of 16 pull requests were merged for this release. + +* `#28050 `__: MAINT: Prepare 2.2.x for further development +* `#28055 `__: TYP: fix ``void`` arrays not accepting ``str`` keys in ``__setitem__`` +* `#28066 `__: TYP: fix unnecessarily broad ``integer`` binop return types (#28065) +* `#28112 `__: TYP: Better ``ndarray`` binop return types for ``float64`` &... +* `#28113 `__: TYP: Return the correct ``bool`` from ``issubdtype`` +* `#28114 `__: TYP: Always accept ``date[time]`` in the ``datetime64`` constructor +* `#28120 `__: BUG: Fix auxdata initialization in ufunc slow path +* `#28131 `__: BUG: move reduction initialization to ufunc initialization +* `#28132 `__: TYP: Fix ``interp`` to accept and return scalars +* `#28137 `__: BUG: call PyType_Ready in f2py to avoid data races +* `#28145 `__: BUG: remove unnecessary call to PyArray_UpdateFlags +* `#28160 `__: BUG: Avoid data race in PyArray_CheckFromAny_int +* `#28175 `__: BUG: Fix f2py directives and --lower casing +* `#28176 `__: TYP: Fix overlapping overloads issue in 2->1 ufuncs +* `#28177 `__: TYP: preserve shape-type in ndarray.astype() +* `#28178 `__: TYP: Fix missing and spurious top-level exports -Highlights -========== - -*We'll choose highlights for this release near the end of the release cycle.* - - -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) - -.. **Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst From f068f418e0041ab90851f96abacc32c154965b58 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 18 Jan 2025 17:22:29 -0700 Subject: [PATCH 060/187] MAINT: Prepare 2.2.x for further development - Create doc/source/release/2.2.3-notes.rst - Update doc/source/release.rst - Update pavement.py - Update pyproject.toml [skip azp] [skip cirrus] [skip actions] --- doc/source/release.rst | 1 + doc/source/release/2.2.3-notes.rst | 19 +++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.3-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 4c83cd3d1ae4..a22178a055ee 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.3 2.2.2 2.2.1 2.2.0 diff --git a/doc/source/release/2.2.3-notes.rst b/doc/source/release/2.2.3-notes.rst new file mode 100644 index 000000000000..3c5a25668c1c --- /dev/null +++ b/doc/source/release/2.2.3-notes.rst @@ -0,0 +1,19 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.3 Release Notes +========================== + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/pavement.py b/pavement.py index 5b2e5578b08b..6b6a0668b7a1 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.2-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.3-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 7c6b19d91fe5..b4f39af4d56c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.2" +version = "2.2.3" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From bea630aa1ff837513eebeeb55558a5545258b6e8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 20 Jan 2025 11:16:21 -0700 Subject: [PATCH 061/187] BUG: fix data race in a more minimal way --- numpy/_core/src/multiarray/ctors.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index cfa8b9231818..b6a935e419a6 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1829,12 +1829,18 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, { PyObject *obj; if (requires & NPY_ARRAY_NOTSWAPPED) { - if (!in_descr && PyArray_Check(op)) { - in_descr = PyArray_DESCR((PyArrayObject *)op); - Py_INCREF(in_descr); + if (!in_descr && PyArray_Check(op) && + PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { + in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); + if (in_descr == NULL) { + return NULL; + } + } + else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { + PyArray_DESCR_REPLACE(in_descr); } - if (in_descr) { - PyArray_DESCR_REPLACE_CANONICAL(in_descr); + if (in_descr && in_descr->byteorder != NPY_IGNORE && in_descr->byteorder != NPY_NATIVE) { + in_descr->byteorder = NPY_NATIVE; } } From d97e0fe88abdd7e271bf36316c370dc62a09d6bd Mon Sep 17 00:00:00 2001 From: Yakov Danishevsky Date: Tue, 21 Jan 2025 10:17:09 +0200 Subject: [PATCH 062/187] BUG: Fix ``from_float_positional`` errors for huge pads (#28149) This PR adds graceful error handling for np.format_float_positional when provided pad_left or pad_right arguments are too large. * TST: Added tests for correct handling of overflow * BUG: fixed pad_left and pad_right causing overflow if too large * TST: added overflow test and fixed formatting * BUG: fixed overflow checks and simplified error handling * BUG: rewritten excpetion message and fixed overflow check * TST: split test into smaller tests, added large input value * Apply suggestions from code review --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/dragon4.c | 64 +++++++------ numpy/_core/tests/test_scalarprint.py | 129 +++++++++++++++++--------- 2 files changed, 121 insertions(+), 72 deletions(-) diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 7cd8afbed6d8..b936f4dc213e 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -1615,7 +1615,8 @@ typedef struct Dragon4_Options { * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 + +static npy_int32 FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -1646,7 +1647,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = '-'; has_sign = 1; } - + numDigits = Dragon4(mantissa, exponent, mantissaBit, hasUnequalMargins, digit_mode, cutoff_mode, precision, min_digits, buffer + has_sign, maxPrintLen - has_sign, @@ -1658,14 +1659,14 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, /* if output has a whole number */ if (printExponent >= 0) { /* leave the whole number at the start of the buffer */ - numWholeDigits = printExponent+1; + numWholeDigits = printExponent+1; if (numDigits <= numWholeDigits) { npy_int32 count = numWholeDigits - numDigits; pos += numDigits; - /* don't overflow the buffer */ - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } /* add trailing zeros up to the decimal point */ @@ -1767,9 +1768,12 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, pos < maxPrintLen) { /* add trailing zeros up to add_digits length */ /* compute the number of trailing zeros needed */ + npy_int32 count = desiredFractionalDigits - numFractionDigits; - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } numFractionDigits += count; @@ -1802,7 +1806,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, } /* add any whitespace padding to right side */ - if (digits_right >= numFractionDigits) { + if (digits_right >= numFractionDigits) { npy_int32 count = digits_right - numFractionDigits; /* in trim_mode DptZeros, if right padding, add a space for the . */ @@ -1811,8 +1815,9 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, buffer[pos++] = ' '; } - if (pos + count > maxPrintLen) { - count = maxPrintLen - pos; + if (count > maxPrintLen - pos) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } for ( ; count > 0; count--) { @@ -1823,14 +1828,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, if (digits_left > numWholeDigits + has_sign) { npy_int32 shift = digits_left - (numWholeDigits + has_sign); npy_int32 count = pos; - - if (count + shift > maxPrintLen) { - count = maxPrintLen - shift; + + if (count > maxPrintLen - shift) { + PyErr_SetString(PyExc_RuntimeError, "Float formating result too large"); + return -1; } if (count > 0) { memmove(buffer + shift, buffer, count); } + pos = shift + count; for ( ; shift > 0; shift--) { buffer[shift - 1] = ' '; @@ -1860,7 +1867,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * * See Dragon4_Options for description of remaining arguments. */ -static npy_uint32 +static npy_int32 FormatScientific (char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, DigitMode digit_mode, @@ -2158,7 +2165,7 @@ PrintInfNan(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa, * Helper function that takes Dragon4 parameters and options and * calls Dragon4. */ -static npy_uint32 +static npy_int32 Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, npy_int32 exponent, char signbit, npy_uint32 mantissaBit, npy_bool hasUnequalMargins, Dragon4_Options *opt) @@ -2187,7 +2194,7 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, * exponent: 5 bits * mantissa: 10 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary16( npy_half *value, Dragon4_Options *opt) { @@ -2274,7 +2281,7 @@ Dragon4_PrintFloat_IEEE_binary16( * exponent: 8 bits * mantissa: 23 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary32( npy_float32 *value, Dragon4_Options *opt) @@ -2367,7 +2374,7 @@ Dragon4_PrintFloat_IEEE_binary32( * exponent: 11 bits * mantissa: 52 bits */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary64( npy_float64 *value, Dragon4_Options *opt) { @@ -2482,7 +2489,7 @@ typedef struct FloatVal128 { * intbit 1 bit, first u64 * mantissa: 63 bits, first u64 */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended( FloatVal128 value, Dragon4_Options *opt) { @@ -2580,7 +2587,7 @@ Dragon4_PrintFloat_Intel_extended( * system. But numpy defines NPY_FLOAT80, so if we come across it, assume it is * an Intel extended format. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended80( npy_float80 *value, Dragon4_Options *opt) { @@ -2604,7 +2611,7 @@ Dragon4_PrintFloat_Intel_extended80( #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 96-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended96( npy_float96 *value, Dragon4_Options *opt) { @@ -2628,7 +2635,7 @@ Dragon4_PrintFloat_Intel_extended96( #ifdef HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE /* Motorola Big-endian equivalent of the Intel-extended 96 fp format */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Motorola_extended96( npy_float96 *value, Dragon4_Options *opt) { @@ -2665,7 +2672,7 @@ typedef union FloatUnion128 #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE /* Intel's 80-bit IEEE extended precision format, 128-bit storage */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_Intel_extended128( npy_float128 *value, Dragon4_Options *opt) { @@ -2694,7 +2701,7 @@ Dragon4_PrintFloat_Intel_extended128( * I am not sure if the arch also supports uint128, and C does not seem to * support int128 literals. So we use uint64 to do manipulation. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128( FloatVal128 val128, Dragon4_Options *opt) { @@ -2779,7 +2786,7 @@ Dragon4_PrintFloat_IEEE_binary128( } #if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_le( npy_float128 *value, Dragon4_Options *opt) { @@ -2799,7 +2806,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( * This function is untested, very few, if any, architectures implement * big endian IEEE binary128 floating point. */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IEEE_binary128_be( npy_float128 *value, Dragon4_Options *opt) { @@ -2854,7 +2861,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( * https://gcc.gnu.org/wiki/Ieee128PowerPCA * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/com.ibm.aix.genprogc/128bit_long_double_floating-point_datatype.htm */ -static npy_uint32 +static npy_int32 Dragon4_PrintFloat_IBM_double_double( npy_float128 *value, Dragon4_Options *opt) { @@ -3041,6 +3048,7 @@ Dragon4_PrintFloat_IBM_double_double( * which goes up to about 10^4932. The Dragon4_scratch struct provides a string * buffer of this size. */ + #define make_dragon4_typefuncs_inner(Type, npy_type, format) \ \ PyObject *\ diff --git a/numpy/_core/tests/test_scalarprint.py b/numpy/_core/tests/test_scalarprint.py index f47542ef779c..b6872c2b482b 100644 --- a/numpy/_core/tests/test_scalarprint.py +++ b/numpy/_core/tests/test_scalarprint.py @@ -8,7 +8,8 @@ from tempfile import TemporaryFile import numpy as np -from numpy.testing import assert_, assert_equal, assert_raises, IS_MUSL +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_raises_regex, IS_MUSL) class TestRealScalars: def test_str(self): @@ -260,53 +261,93 @@ def test_dragon4(self): assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00") - def test_dragon4_interface(self): - tps = [np.float16, np.float32, np.float64] + available_float_dtypes = [np.float16, np.float32, np.float64, np.float128]\ + if hasattr(np, 'float128') else [np.float16, np.float32, np.float64] + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface(self, tp): # test is flaky for musllinux on np.float128 - if hasattr(np, 'float128') and not IS_MUSL: - tps.append(np.float128) - + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface_trim(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + fpos = np.format_float_positional + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + + @pytest.mark.parametrize("tp", available_float_dtypes) + @pytest.mark.parametrize("pad_val", [10**5, np.iinfo("int32").max]) + def test_dragon4_positional_interface_overflow(self, tp, pad_val): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + #gh-28068 + with pytest.raises(RuntimeError, + match="Float formating result too large"): + fpos(tp('1.047'), unique=False, precision=pad_val) + + with pytest.raises(RuntimeError, + match="Float formating result too large"): + fpos(tp('1.047'), precision=2, pad_left=pad_val) + + with pytest.raises(RuntimeError, + match="Float formating result too large"): + fpos(tp('1.047'), precision=2, pad_right=pad_val) + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_scientific_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + fsci = np.format_float_scientific - for tp in tps: - # test padding - assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") - assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") - assert_equal(fpos(tp('-10.2'), - pad_left=4, pad_right=4), " -10.2 ") - - # test exp_digits - assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") - - # test fixed (non-unique) mode - assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") - assert_equal(fsci(tp('1.0'), unique=False, precision=4), - "1.0000e+00") - - # test trimming - # trim of 'k' or '.' only affects non-unique mode, since unique - # mode will not output trailing 0s. - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), - "1.0000") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), - "1.") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), - "1.2" if tp != np.float16 else "1.2002") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), - "1.0") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='0'), "1.0") - - assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), - "1") - assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), - "1.2" if tp != np.float16 else "1.2002") - assert_equal(fpos(tp('1.'), trim='-'), "1") - assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") @pytest.mark.skipif(not platform.machine().startswith("ppc64"), reason="only applies to ppc float128 values") From c1864a97978a41955f810f91bef24fc38b3273fd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 20 Jan 2025 13:26:33 -0700 Subject: [PATCH 063/187] TST: remove unnecessary iters argument from run_threaded helper --- numpy/_core/tests/test_multithreading.py | 5 ++--- numpy/testing/_private/utils.py | 10 +++------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index b614f2c76385..61ae5a6186f9 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -38,7 +38,7 @@ def f(b): b.wait() return a.sum() - run_threaded(f, NUM_THREADS, max_workers=NUM_THREADS, pass_barrier=True) + run_threaded(f, NUM_THREADS, pass_barrier=True) def test_temp_elision_thread_safety(): @@ -129,8 +129,7 @@ def closure(b): b.wait() np.sum(x) - run_threaded(closure, NUM_THREADS, max_workers=NUM_THREADS, - pass_barrier=True) + run_threaded(closure, NUM_THREADS, pass_barrier=True) def test_parallel_flat_iterator(): diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 3c2d398e8a29..4a97ff111cd7 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2685,7 +2685,7 @@ def _get_glibc_version(): _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) -def run_threaded(func, iters=8, pass_count=False, max_workers=8, +def run_threaded(func, max_workers=8, pass_count=False, pass_barrier=False, outer_iterations=1, prepare_args=None): """Runs a function many times in parallel""" @@ -2697,15 +2697,11 @@ def run_threaded(func, iters=8, pass_count=False, max_workers=8, else: args = prepare_args() if pass_barrier: - if max_workers != iters: - raise RuntimeError( - "Must set max_workers equal to the number of " - "iterations to avoid deadlocks.") barrier = threading.Barrier(max_workers) args.append(barrier) if pass_count: - futures = [tpe.submit(func, i, *args) for i in range(iters)] + futures = [tpe.submit(func, i, *args) for i in range(max_workers)] else: - futures = [tpe.submit(func, *args) for _ in range(iters)] + futures = [tpe.submit(func, *args) for _ in range(max_workers)] for f in futures: f.result() From 1451af2b7dde4ab12271663145aae1b0d832a0ba Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 20 Jan 2025 13:26:48 -0700 Subject: [PATCH 064/187] TST: add failing test for multithreaded repeat --- numpy/_core/tests/test_multithreading.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 61ae5a6186f9..2ddca57dbd0b 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -154,3 +154,14 @@ def closure(x, b): y.flat[x] = x run_threaded(closure, pass_barrier=True, prepare_args=prepare_args) + + +def test_multithreaded_repeat(): + x0 = np.arange(10) + + def closure(b): + b.wait() + for _ in range(100): + x = np.repeat(x0, 2, axis=0)[::2] + + run_threaded(closure, max_workers=10, pass_barrier=True) From cec8fbfbb8ad4fe3f5255336d35071adfe38c146 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 20 Jan 2025 13:26:57 -0700 Subject: [PATCH 065/187] BUG: avoid data race in PyArray_Repeat --- numpy/_core/src/multiarray/item_selection.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index eadb7cc099d3..fbcc0f7b162c 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -922,16 +922,23 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) } } + /* Fill in dimensions of new array */ + npy_intp dims[NPY_MAXDIMS] = {0}; + + for (int i = 0; i < PyArray_NDIM(aop); i++) { + dims[i] = PyArray_DIMS(aop)[i]; + } + + dims[axis] = total; + /* Construct new array */ - PyArray_DIMS(aop)[axis] = total; Py_INCREF(PyArray_DESCR(aop)); ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(aop), PyArray_DESCR(aop), PyArray_NDIM(aop), - PyArray_DIMS(aop), + dims, NULL, NULL, 0, (PyObject *)aop); - PyArray_DIMS(aop)[axis] = n; if (ret == NULL) { goto fail; } From ce17d543494327ce5ca3297ea6c206f6e97a08c3 Mon Sep 17 00:00:00 2001 From: Chris Sidebottom Date: Mon, 20 Jan 2025 18:09:41 +0000 Subject: [PATCH 066/187] Use VQSORT_COMPILER_COMPATIBLE to determine if we should use VQSort Previously we copied the compiler detection across from the Highway header, now we don't have to manually keep it up to date. --- numpy/_core/src/highway | 2 +- numpy/_core/src/npysort/highway_qsort.dispatch.cpp | 2 -- numpy/_core/src/npysort/highway_qsort.hpp | 11 +++-------- .../src/npysort/highway_qsort_16bit.dispatch.cpp | 2 -- 4 files changed, 4 insertions(+), 13 deletions(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 68b0fdebffb1..f2209b911c74 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 68b0fdebffb14f3b8473fed1c33ce368efc431e7 +Subproject commit f2209b911c74019e85d0b7a7a2833c9a2e1b7995 diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 194a81e2d7e9..645055537d87 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -1,6 +1,4 @@ #include "highway_qsort.hpp" -#define VQSORT_ONLY_STATIC 1 -#include "hwy/contrib/sort/vqsort-inl.h" #if VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index ba3fe4920594..77cd9f085943 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,18 +1,13 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#define VQSORT_ONLY_STATIC 1 #include "hwy/highway.h" +#include "hwy/contrib/sort/vqsort-inl.h" #include "common.hpp" -// This replicates VQSORT_ENABLED from hwy/contrib/sort/shared-inl.h -// without checking the scalar target as this is not built within the dynamic -// dispatched sources. -#if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) || \ - (HWY_ARCH_ARM_A64 && HWY_COMPILER_CLANG && \ - (HWY_IS_HWASAN || HWY_IS_MSAN || HWY_IS_TSAN || HWY_IS_ASAN)) +#if !VQSORT_COMPILER_COMPATIBLE #define NPY_DISABLE_HIGHWAY_SORT #endif diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index d069cb6373d0..d151de2b5e62 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -1,6 +1,4 @@ #include "highway_qsort.hpp" -#define VQSORT_ONLY_STATIC 1 -#include "hwy/contrib/sort/vqsort-inl.h" #include "quicksort.hpp" From 1de016b2f4c2a695b58c5d7ebd109f144c662673 Mon Sep 17 00:00:00 2001 From: Raghuveer Devulapalli Date: Thu, 23 Jan 2025 13:48:26 -0800 Subject: [PATCH 067/187] update highway to latest --- numpy/_core/src/highway | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index f2209b911c74..0b696633f9ad 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit f2209b911c74019e85d0b7a7a2833c9a2e1b7995 +Subproject commit 0b696633f9ad89497dd5532b55eaa01625ad71ca From 90169141e7d964d6f057b5525794ccb2f3844ee3 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 27 Jan 2025 10:09:02 -0700 Subject: [PATCH 068/187] BUG: Add cpp atomic support (#28234) * BUG: add C++ support to npy_atomic.h * MAINT: delete outdated comment --- numpy/_core/src/common/npy_atomic.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h index 910028dcde7c..f5b41d7068be 100644 --- a/numpy/_core/src/common/npy_atomic.h +++ b/numpy/_core/src/common/npy_atomic.h @@ -9,11 +9,18 @@ #include "numpy/npy_common.h" -#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ +#ifdef __cplusplus + extern "C++" { + #include + } + #define _NPY_USING_STD using namespace std + #define _Atomic(tp) atomic + #define STDC_ATOMICS +#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ && !defined(__STDC_NO_ATOMICS__) -// TODO: support C++ atomics as well if this header is ever needed in C++ #include #include + #define _NPY_USING_STD #define STDC_ATOMICS #elif _MSC_VER #include @@ -35,6 +42,7 @@ static inline npy_uint8 npy_atomic_load_uint8(const npy_uint8 *obj) { #ifdef STDC_ATOMICS + _NPY_USING_STD; return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); #elif defined(MSC_ATOMICS) #if defined(_M_X64) || defined(_M_IX86) @@ -50,6 +58,7 @@ npy_atomic_load_uint8(const npy_uint8 *obj) { static inline void* npy_atomic_load_ptr(const void *obj) { #ifdef STDC_ATOMICS + _NPY_USING_STD; return atomic_load((const _Atomic(void *)*)obj); #elif defined(MSC_ATOMICS) #if SIZEOF_VOID_P == 8 @@ -73,6 +82,7 @@ npy_atomic_load_ptr(const void *obj) { static inline void npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { #ifdef STDC_ATOMICS + _NPY_USING_STD; atomic_store((_Atomic(uint8_t)*)obj, value); #elif defined(MSC_ATOMICS) _InterlockedExchange8((volatile char *)obj, (char)value); @@ -85,6 +95,7 @@ static inline void npy_atomic_store_ptr(void *obj, void *value) { #ifdef STDC_ATOMICS + _NPY_USING_STD; atomic_store((_Atomic(void *)*)obj, value); #elif defined(MSC_ATOMICS) _InterlockedExchangePointer((void * volatile *)obj, (void *)value); From 3e9d196373d3a71353a7e99417e85aea3fcb73d0 Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Mon, 27 Jan 2025 16:09:52 +0000 Subject: [PATCH 069/187] BLD: Compile fix for clang-cl on WoA Do not try and use x86 FPU instruction on ARM. --- numpy/_core/src/multiarray/_multiarray_tests.c.src | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 5d0d91f1e996..44b262c10f8e 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -1877,7 +1877,8 @@ get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args) result = _controlfp(0, 0); return PyLong_FromLongLong(result); } -#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) || (defined(_MSC_VER) && defined(__clang__)) +#elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) \ + || (defined(_MSC_VER) && defined(__clang__) && !defined(__ARM_ARCH)) { unsigned short cw = 0; __asm__("fstcw %w0" : "=m" (cw)); From 3e5f2560d1581e87a0388eaee1244f04672324c1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 28 Jan 2025 18:08:36 +0100 Subject: [PATCH 070/187] TYP: Avoid upcasting ``float64`` in the set-ops --- numpy/lib/_arraysetops_impl.pyi | 96 +++++-------------- .../typing/tests/data/reveal/arraysetops.pyi | 7 +- 2 files changed, 28 insertions(+), 75 deletions(-) diff --git a/numpy/lib/_arraysetops_impl.pyi b/numpy/lib/_arraysetops_impl.pyi index 3261cdac8cf6..20f2d576bf00 100644 --- a/numpy/lib/_arraysetops_impl.pyi +++ b/numpy/lib/_arraysetops_impl.pyi @@ -10,35 +10,7 @@ from typing import ( from typing_extensions import deprecated import numpy as np -from numpy import ( - generic, - number, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - int8, - byte, - intc, - int_, - intp, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) +from numpy import generic, number, int8, intp, timedelta64, object_ from numpy._typing import ( ArrayLike, @@ -75,33 +47,17 @@ _NumberType = TypeVar("_NumberType", bound=number[Any]) # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) # which could result in, for example, `int64` and `float64`producing a # `number[_64Bit]` array -_SCTNoCast = TypeVar( - "_SCTNoCast", +_EitherSCT = TypeVar( + "_EitherSCT", np.bool, - ushort, - ubyte, - uintc, - uint, - ulonglong, - short, - byte, - intc, - int_, - longlong, - half, - single, - double, - longdouble, - csingle, - cdouble, - clongdouble, - timedelta64, - datetime64, - object_, - str_, - bytes_, - void, -) + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip class UniqueAllResult(NamedTuple, Generic[_SCT]): values: NDArray[_SCT] @@ -339,11 +295,11 @@ def unique_values(x: ArrayLike, /) -> NDArray[Any]: ... @overload def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., return_indices: L[False] = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def intersect1d( ar1: ArrayLike, @@ -353,11 +309,11 @@ def intersect1d( ) -> NDArray[Any]: ... @overload def intersect1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., return_indices: L[True] = ..., -) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ... +) -> tuple[NDArray[_EitherSCT], NDArray[intp], NDArray[intp]]: ... @overload def intersect1d( ar1: ArrayLike, @@ -368,10 +324,10 @@ def intersect1d( @overload def setxor1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def setxor1d( ar1: ArrayLike, @@ -400,9 +356,9 @@ def in1d( @overload def union1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], -) -> NDArray[_SCTNoCast]: ... + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], +) -> NDArray[_EitherSCT]: ... @overload def union1d( ar1: ArrayLike, @@ -411,10 +367,10 @@ def union1d( @overload def setdiff1d( - ar1: _ArrayLike[_SCTNoCast], - ar2: _ArrayLike[_SCTNoCast], + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], assume_unique: bool = ..., -) -> NDArray[_SCTNoCast]: ... +) -> NDArray[_EitherSCT]: ... @overload def setdiff1d( ar1: ArrayLike, diff --git a/numpy/typing/tests/data/reveal/arraysetops.pyi b/numpy/typing/tests/data/reveal/arraysetops.pyi index 33793f8deebc..eabc7677cde9 100644 --- a/numpy/typing/tests/data/reveal/arraysetops.pyi +++ b/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -2,10 +2,7 @@ from typing import Any import numpy as np import numpy.typing as npt -from numpy.lib._arraysetops_impl import ( - UniqueAllResult, UniqueCountsResult, UniqueInverseResult -) -from numpy._typing import _64Bit +from numpy.lib._arraysetops_impl import UniqueAllResult, UniqueCountsResult, UniqueInverseResult from typing_extensions import assert_type @@ -28,7 +25,7 @@ assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datet assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) assert_type( np.intersect1d(AR_f8, AR_f8, return_indices=True), - tuple[npt.NDArray[np.floating[_64Bit]], npt.NDArray[np.intp], npt.NDArray[np.intp]], + tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], ) assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) From d2e94626cb2fd17dc27865551fe560462a87bb15 Mon Sep 17 00:00:00 2001 From: Matthew Brett Date: Mon, 27 Jan 2025 20:51:27 +0000 Subject: [PATCH 071/187] BLD: better fix for clang / ARM compiles The `_M_IX86` and `_M_AMD64` macros defined for Intel installs. `_M_AMD64` defined for AMD and x86_64 installs. See: https://learn.microsoft.com/en-us/cpp/preprocessor/predefined-macros?view=msvc-170 --- numpy/_core/src/multiarray/_multiarray_tests.c.src | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 44b262c10f8e..fc73a64b19a0 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -1878,7 +1878,8 @@ get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args) return PyLong_FromLongLong(result); } #elif (defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))) \ - || (defined(_MSC_VER) && defined(__clang__) && !defined(__ARM_ARCH)) + || (defined(_MSC_VER) && defined(__clang__) && \ + (defined(_M_IX86) || defined(_M_AMD64))) { unsigned short cw = 0; __asm__("fstcw %w0" : "=m" (cw)); From e1b604e771c73bbbe19757d6caedeb1c31f38811 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 1 Feb 2025 19:25:07 +0100 Subject: [PATCH 072/187] TYP: Add missing overloads to ``timedelta64.__divmod__`` --- numpy/__init__.pyi | 21 ++++- numpy/typing/tests/data/reveal/mod.pyi | 126 +++++++++++++++---------- 2 files changed, 95 insertions(+), 52 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 225e63b4d32d..8aa5882e51d2 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4407,6 +4407,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @property def nbytes(self) -> L[8]: ... + # TODO(jorenham): timedelta64(0) constructor @overload def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... @overload @@ -4458,27 +4459,39 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __mul__(self, x: float | np.floating[Any] | np.integer[Any] | np.bool, /) -> timedelta64: ... __rmul__ = __mul__ + # TODO(jorenham): unhandled zero division positibility @overload def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... - @overload def __mod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... @overload - def __mod__(self, x: timedelta64[int], /) -> timedelta64[int]: ... + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload def __mod__(self, x: timedelta64, /) -> timedelta64: ... __rmod__ = __mod__ # at runtime the outcomes differ, but the type signatures are the same + # keep in sync with __mod__ @overload def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int]]: ... + @overload def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload + def __divmod__( + self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / + ) -> tuple[int64, timedelta64[_AnyTD64Item]]: ... + @overload + def __divmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... __rdivmod__ = __divmod__ @@ -4508,6 +4521,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + # TODO(jorenham): unhandled zero division positibility @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @overload @@ -4523,6 +4537,7 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rtruediv__(self, a: timedelta64, /) -> float64: ... + # TODO(jorenham): unhandled zero division positibility @overload def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index bd7a632b0a24..26c802e89cc4 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,39 +1,63 @@ import datetime as dt -from typing import Any + +from typing_extensions import assert_type import numpy as np import numpy.typing as npt -from numpy._typing import _32Bit, _64Bit +from numpy._typing import _64Bit -from typing_extensions import assert_type +f8: np.float64 +i8: np.int64 +u8: np.uint64 -f8 = np.float64() -i8 = np.int64() -u8 = np.uint64() +f4: np.float32 +i4: np.int32 +u4: np.uint32 -f4 = np.float32() -i4 = np.int32() -u4 = np.uint32() +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] -td = np.timedelta64(0, "D") -b_ = np.bool() +b_: np.bool -b = bool() -f = float() -i = int() +b: bool +i: int +f: float AR_b: npt.NDArray[np.bool] AR_m: npt.NDArray[np.timedelta64] # Time structures -assert_type(td % td, np.timedelta64[dt.timedelta]) -assert_type(AR_m % td, npt.NDArray[np.timedelta64]) -assert_type(td % AR_m, npt.NDArray[np.timedelta64]) - -assert_type(divmod(td, td), tuple[np.int64, np.timedelta64]) -assert_type(divmod(AR_m, td), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) -assert_type(divmod(td, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +assert_type(m_nat % m, np.timedelta64[None]) +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int]) +assert_type(m_int % m_td, np.timedelta64[int]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) +assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) +# workaround for https://github.com/microsoft/pyright/issues/9663 +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int]]) +assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) # Bool @@ -47,11 +71,12 @@ assert_type(b_ % f8, np.float64) assert_type(b_ % AR_b, npt.NDArray[np.int8]) assert_type(divmod(b_, b), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i), tuple[np.int_, np.int_]) -assert_type(divmod(b_, f), tuple[np.float64, np.float64]) assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) -assert_type(divmod(b_, i8), tuple[np.int64, np.int64]) -assert_type(divmod(b_, u8), tuple[np.uint64, np.uint64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) @@ -77,26 +102,27 @@ assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) assert_type(i8 % b, np.int64) assert_type(i8 % i8, np.int64) -assert_type(i8 % f, np.floating[_64Bit]) -assert_type(i8 % f8, np.floating[_64Bit]) +assert_type(i8 % f, np.float64 | np.floating[_64Bit]) +assert_type(i8 % f8, np.float64 | np.floating[_64Bit]) assert_type(i4 % i8, np.int64 | np.int32) assert_type(i4 % f8, np.float64 | np.float32) assert_type(i4 % i4, np.int32) assert_type(i4 % f4, np.float32) assert_type(i8 % AR_b, npt.NDArray[np.int64]) -assert_type(divmod(i8, b), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) -assert_type(divmod(i8, f), tuple[np.floating[_64Bit], np.floating[_64Bit]]) -assert_type(divmod(i8, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) -assert_type(divmod(i8, f8), tuple[np.floating[_64Bit], np.floating[_64Bit]]) -assert_type(divmod(i8, i4), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]] | tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(i4, i4), tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(i4, f4), tuple[np.floating[_32Bit], np.floating[_32Bit]]) +assert_type(divmod(i8, b), tuple[np.int64, np.int64]) +assert_type(divmod(i8, i4), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(i8.__divmod__(f), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(i4, f4), tuple[np.float32, np.float32]) assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) -assert_type(b % i8, np.signedinteger[_64Bit]) -assert_type(f % i8, np.floating[_64Bit]) +assert_type(b % i8, np.int64) +assert_type(f % i8, np.float64 | np.floating[_64Bit]) assert_type(i8 % i8, np.int64) assert_type(f8 % i8, np.float64) assert_type(i8 % i4, np.int64 | np.int32) @@ -105,21 +131,22 @@ assert_type(i4 % i4, np.int32) assert_type(f4 % i4, np.float32) assert_type(AR_b % i8, npt.NDArray[np.int64]) -assert_type(divmod(b, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]]) -assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]]) +assert_type(divmod(b, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float64, np.float64]) assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) -assert_type(divmod(i4, i8), tuple[np.signedinteger[_64Bit], np.signedinteger[_64Bit]] | tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(f4, i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(i4, i4), tuple[np.signedinteger[_32Bit], np.signedinteger[_32Bit]]) -assert_type(divmod(f4, i4), tuple[np.floating[_32Bit], np.floating[_32Bit]]) -assert_type(divmod(AR_b, i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) +assert_type(divmod(i4, i8), tuple[np.int64, np.int64] | tuple[np.int32, np.int32]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f4.__divmod__(i8), tuple[np.floating[_64Bit], np.floating[_64Bit]] | tuple[np.float32, np.float32]) +assert_type(f4.__divmod__(i4), tuple[np.float32, np.float32]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) # float assert_type(f8 % b, np.float64) assert_type(f8 % f, np.float64) -assert_type(i8 % f4, np.floating[_64Bit] | np.floating[_32Bit]) +assert_type(i8 % f4, np.floating[_64Bit] | np.float32) assert_type(f4 % f4, np.float32) assert_type(f8 % AR_b, npt.NDArray[np.float64]) @@ -131,15 +158,16 @@ assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) assert_type(b % f8, np.float64) -assert_type(f % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` assert_type(f8 % f8, np.float64) assert_type(f8 % f8, np.float64) assert_type(f4 % f4, np.float32) assert_type(AR_b % f8, npt.NDArray[np.float64]) assert_type(divmod(b, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f, f8), tuple[np.float64, np.float64]) assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) -assert_type(divmod(f4, f8), tuple[np.float64, np.float64] | tuple[np.float32, np.float32]) assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) -assert_type(divmod(AR_b, f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) From caf0748ca62b923cc63038edb2f3ab75d214aff1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 1 Feb 2025 19:55:16 +0100 Subject: [PATCH 073/187] TYP: Take zero division into account in ``timedelta64.__[div]mod__`` --- numpy/__init__.pyi | 64 ++++++++++++++++++-------- numpy/typing/tests/data/reveal/mod.pyi | 26 +++++++---- 2 files changed, 62 insertions(+), 28 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8aa5882e51d2..e354c93ad9cd 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4407,7 +4407,6 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @property def nbytes(self) -> L[8]: ... - # TODO(jorenham): timedelta64(0) constructor @overload def __init__(self, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> None: ... @overload @@ -4415,10 +4414,12 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __init__(self: timedelta64[None], value: _NaTValue | None, format: _TimeUnitSpec, /) -> None: ... @overload - def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + def __init__(self: timedelta64[L[0]], value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... @overload def __init__(self: timedelta64[int], value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> None: ... @overload + def __init__(self: timedelta64[int], value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> None: ... + @overload def __init__( self: timedelta64[dt.timedelta], value: dt.timedelta | _IntLike_co, @@ -4459,41 +4460,68 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] def __mul__(self, x: float | np.floating[Any] | np.integer[Any] | np.bool, /) -> timedelta64: ... __rmul__ = __mul__ - # TODO(jorenham): unhandled zero division positibility @overload - def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + def __mod__(self, x: timedelta64[None | L[0]], /) -> timedelta64[None]: ... @overload - def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... @overload - def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... @overload - def __mod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... @overload def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... @overload def __mod__(self, x: timedelta64, /) -> timedelta64: ... - __rmod__ = __mod__ # at runtime the outcomes differ, but the type signatures are the same + + # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + @overload + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... # keep in sync with __mod__ @overload - def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + def __divmod__(self, x: timedelta64[None | L[0]], /) -> tuple[int64, timedelta64[None]]: ... @overload - def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int]]: ... + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... @overload - def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... @overload - def __divmod__( - self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], / - ) -> tuple[int64, timedelta64[_AnyTD64Item]]: ... + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... @overload - def __divmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... @overload def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... @overload def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... - __rdivmod__ = __divmod__ + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[None | L[0]], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... @overload def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... @@ -4521,7 +4549,6 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... - # TODO(jorenham): unhandled zero division positibility @overload def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... @overload @@ -4537,7 +4564,6 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __rtruediv__(self, a: timedelta64, /) -> float64: ... - # TODO(jorenham): unhandled zero division positibility @overload def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... @overload diff --git a/numpy/typing/tests/data/reveal/mod.pyi b/numpy/typing/tests/data/reveal/mod.pyi index 26c802e89cc4..db79504fdd1f 100644 --- a/numpy/typing/tests/data/reveal/mod.pyi +++ b/numpy/typing/tests/data/reveal/mod.pyi @@ -1,4 +1,5 @@ import datetime as dt +from typing import Literal as L from typing_extensions import assert_type @@ -16,6 +17,7 @@ u4: np.uint32 m: np.timedelta64 m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] m_int: np.timedelta64[int] m_td: np.timedelta64[dt.timedelta] @@ -32,29 +34,35 @@ AR_m: npt.NDArray[np.timedelta64] assert_type(m % m, np.timedelta64) assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) assert_type(m % m_int, np.timedelta64[int | None]) assert_type(m_nat % m, np.timedelta64[None]) assert_type(m_int % m_nat, np.timedelta64[None]) -assert_type(m_int % m_int, np.timedelta64[int]) -assert_type(m_int % m_td, np.timedelta64[int]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) assert_type(m_td % m_nat, np.timedelta64[None]) -assert_type(m_td % m_int, np.timedelta64[int]) -assert_type(m_td % m_td, np.timedelta64[dt.timedelta]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) assert_type(AR_m % m, npt.NDArray[np.timedelta64]) assert_type(m % AR_m, npt.NDArray[np.timedelta64]) assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) -# workaround for https://github.com/microsoft/pyright/issues/9663 +assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int]]) -assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int]]) +assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) -assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int]]) -assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta]]) +assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) From 696e4a57ac6245581aacb0bcbc3612dde4ecc9d1 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 4 Feb 2025 19:08:51 +0200 Subject: [PATCH 074/187] TYP: Added missing legacy options to set_printoptions' typing information --- numpy/_core/arrayprint.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 10728131ba3f..6729b55eecca 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -65,7 +65,7 @@ def set_printoptions( sign: Literal[None, "-", "+", " "] = ..., floatmode: None | _FloatMode = ..., *, - legacy: Literal[None, False, "1.13", "1.21"] = ..., + legacy: Literal[False, "1.13", "1.21", "1.25", "2.1"] | None = ..., override_repr: None | Callable[[NDArray[Any]], str] = ..., ) -> None: ... def get_printoptions() -> _FormatOptions: ... From d585feb81dcf4c434c3e72521c9ddb4e65968e74 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 5 Feb 2025 08:55:57 -0700 Subject: [PATCH 075/187] BUG: backport resource cleanup bugfix from gh-28273 --- numpy/_core/src/umath/ufunc_object.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/umath/ufunc_object.c b/numpy/_core/src/umath/ufunc_object.c index 657330b6f4be..69bb0b1eb197 100644 --- a/numpy/_core/src/umath/ufunc_object.c +++ b/numpy/_core/src/umath/ufunc_object.c @@ -5963,7 +5963,6 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) NPY_AUXDATA_FREE(auxdata); Py_XDECREF(op2_array); - Py_XDECREF(iter); Py_XDECREF(iter2); for (int i = 0; i < nop; i++) { Py_XDECREF(operation_descrs[i]); @@ -5979,9 +5978,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) if (PyArray_FLAGS(op1_array) & NPY_ARRAY_WRITEBACKIFCOPY) { PyArray_DiscardWritebackIfCopy(op1_array); } + // iter might own the last refrence to op1_array, + // so it must be decref'd second + Py_XDECREF(iter); return NULL; } else { + Py_XDECREF(iter); Py_RETURN_NONE; } } From c455112ad452e8c683aedf48649384c3591404d2 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 4 Feb 2025 16:23:32 -0700 Subject: [PATCH 076/187] BUG: fix incorrect bytes to stringdtype coercion --- numpy/_core/src/multiarray/stringdtype/dtype.c | 9 +++++++++ numpy/_core/tests/test_stringdtype.py | 14 +++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 81a846bf6d96..cb8265dd3d7a 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -270,6 +270,15 @@ as_pystring(PyObject *scalar, int coerce) "string coercion is disabled."); return NULL; } + else if (scalar_type == &PyBytes_Type) { + // assume UTF-8 encoding + char *buffer; + Py_ssize_t length; + if (PyBytes_AsStringAndSize(scalar, &buffer, &length) < 0) { + return NULL; + } + return PyUnicode_FromStringAndSize(buffer, length); + } else { // attempt to coerce to str scalar = PyObject_Str(scalar); diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index ad4276f40a3e..29b52b27afe8 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -190,10 +190,14 @@ def test_array_creation_utf8(dtype, data): ], ) def test_scalars_string_conversion(data, dtype): + try: + str_vals = [str(d.decode('utf-8')) for d in data] + except AttributeError: + str_vals = [str(d) for d in data] if dtype.coerce: assert_array_equal( np.array(data, dtype=dtype), - np.array([str(d) for d in data], dtype=dtype), + np.array(str_vals, dtype=dtype), ) else: with pytest.raises(ValueError): @@ -284,6 +288,14 @@ def test_bytes_casts(self, dtype, strings): barr = np.array(utf8_bytes, dtype=bytes_dtype) assert_array_equal(barr, sarr.astype(bytes_dtype)) assert_array_equal(barr.astype(dtype), sarr) + if dtype.coerce: + barr = np.array(utf8_bytes, dtype=dtype) + assert_array_equal(barr, sarr) + barr = np.array(utf8_bytes, dtype="O") + assert_array_equal(barr.astype(dtype), sarr) + else: + with pytest.raises(ValueError): + np.array(utf8_bytes, dtype=dtype) except UnicodeEncodeError: with pytest.raises(UnicodeEncodeError): sarr.astype("S20") From 3cfe1fe60b516bff1e084fe08ad7a23faac6641f Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Feb 2025 20:13:12 +0100 Subject: [PATCH 077/187] TYP: Fix scalar constructors --- numpy/__init__.pyi | 42 ++++++++++++++++++++++-- numpy/typing/tests/data/fail/scalars.pyi | 3 +- 2 files changed, 40 insertions(+), 5 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e354c93ad9cd..2876c52b6c3c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -4134,6 +4134,9 @@ float32: TypeAlias = floating[_32Bit] # either a C `double`, `float`, or `longdouble` class float64(floating[_64Bit], float): # type: ignore[misc] + def __new__(cls, x: _ConvertibleToFloat | None = ..., /) -> Self: ... + + # @property def itemsize(self) -> L[8]: ... @property @@ -4268,7 +4271,15 @@ longdouble: TypeAlias = floating[_NBitLongDouble] # describing the two 64 bit floats representing its real and imaginary component class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): - def __init__(self, value: _ConvertibleToComplex | None = ..., /) -> None: ... + @overload + def __init__( + self, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> None: ... + @overload + def __init__(self, real: _ConvertibleToComplex | None = ..., /) -> None: ... @property def real(self) -> floating[_NBit1]: ... # type: ignore[override] @@ -4352,6 +4363,17 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): complex64: TypeAlias = complexfloating[_32Bit, _32Bit] class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc] + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = ..., + imag: complex | SupportsFloat | SupportsIndex = ..., + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = ..., /) -> Self: ... + + # @property def itemsize(self) -> L[16]: ... @property @@ -4706,12 +4728,26 @@ class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): class bytes_(character[bytes], bytes): @overload - def __init__(self, value: object = ..., /) -> None: ... + def __new__(cls, o: object = ..., /) -> Self: ... + @overload + def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + + # + @overload + def __init__(self, o: object = ..., /) -> None: ... @overload - def __init__(self, value: str, /, encoding: str = ..., errors: str = ...) -> None: ... + def __init__(self, s: str, /, encoding: str, errors: str = ...) -> None: ... + + # def __bytes__(self, /) -> bytes: ... class str_(character[str], str): + @overload + def __new__(cls, value: object = ..., /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + + # @overload def __init__(self, value: object = ..., /) -> None: ... @overload diff --git a/numpy/typing/tests/data/fail/scalars.pyi b/numpy/typing/tests/data/fail/scalars.pyi index 5c6ccb177fbb..e847d8d6c45a 100644 --- a/numpy/typing/tests/data/fail/scalars.pyi +++ b/numpy/typing/tests/data/fail/scalars.pyi @@ -28,7 +28,6 @@ np.float32(3j) # E: incompatible type np.float32([1.0, 0.0, 0.0]) # E: incompatible type np.complex64([]) # E: incompatible type -np.complex64(1, 2) # E: Too many arguments # TODO: protocols (can't check for non-existent protocols w/ __getattr__) np.datetime64(0) # E: No overload variant @@ -60,7 +59,7 @@ np.flexible(b"test") # E: Cannot instantiate abstract class np.float64(value=0.0) # E: Unexpected keyword argument np.int64(value=0) # E: Unexpected keyword argument np.uint64(value=0) # E: Unexpected keyword argument -np.complex128(value=0.0j) # E: Unexpected keyword argument +np.complex128(value=0.0j) # E: No overload variant np.str_(value='bob') # E: No overload variant np.bytes_(value=b'test') # E: No overload variant np.void(value=b'test') # E: No overload variant From ac7672279f62df7816b6b56b35298b2e559bc772 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Feb 2025 20:24:57 +0100 Subject: [PATCH 078/187] TYP: stub ``numpy.matlib`` --- numpy/matlib.pyi | 578 ++++++++++++++++++++++++++++++++++++++++++++++ numpy/meson.build | 1 + 2 files changed, 579 insertions(+) create mode 100644 numpy/matlib.pyi diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi new file mode 100644 index 000000000000..67b753a87c32 --- /dev/null +++ b/numpy/matlib.pyi @@ -0,0 +1,578 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt + +# ruff: noqa: F401 +from numpy import ( + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + # row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + sqrt, + square, + squeeze, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + trapz, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_T = TypeVar("_T", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# ruff: noqa: F811 + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +@overload +def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/numpy/meson.build b/numpy/meson.build index 353f89398ca2..3faebf17d90e 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -289,6 +289,7 @@ python_sources = [ 'dtypes.py', 'dtypes.pyi', 'matlib.py', + 'matlib.pyi', 'py.typed', 'version.pyi', ] From 03adb26b417f86ed38a33960800493b97e1c6ab5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Feb 2025 20:29:26 +0100 Subject: [PATCH 079/187] TYP: stub ``numpy.testing.overrides`` --- numpy/testing/overrides.pyi | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 numpy/testing/overrides.pyi diff --git a/numpy/testing/overrides.pyi b/numpy/testing/overrides.pyi new file mode 100644 index 000000000000..3fefc3f350da --- /dev/null +++ b/numpy/testing/overrides.pyi @@ -0,0 +1,11 @@ +from collections.abc import Callable, Hashable +from typing import Any + +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... From 00000e2aea0f00b9054239bb3d488246779d7319 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Feb 2025 20:31:21 +0100 Subject: [PATCH 080/187] TYP: stub ``numpy.testing._private`` --- numpy/testing/_private/__init__.pyi | 0 numpy/testing/_private/extbuild.pyi | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 numpy/testing/_private/__init__.pyi create mode 100644 numpy/testing/_private/extbuild.pyi diff --git a/numpy/testing/_private/__init__.pyi b/numpy/testing/_private/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/testing/_private/extbuild.pyi b/numpy/testing/_private/extbuild.pyi new file mode 100644 index 000000000000..609a45e79d16 --- /dev/null +++ b/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] = [], + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] = [], + library_dirs: Sequence[str] = [], +) -> pathlib.Path: ... From 719ba59e26f944a4567f9c4bdb3fe18c1136a318 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 5 Feb 2025 20:32:55 +0100 Subject: [PATCH 081/187] TYP: stub ``numpy.testing.print_coercion_tables`` --- numpy/testing/print_coercion_tables.pyi | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 numpy/testing/print_coercion_tables.pyi diff --git a/numpy/testing/print_coercion_tables.pyi b/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 000000000000..e6430304675e --- /dev/null +++ b/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,27 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic + +from typing_extensions import Self, TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... From 9a37ba02936b23486a8f58c40541f2c42364f1e0 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 28 Jan 2025 18:29:30 +0100 Subject: [PATCH 082/187] CI: Fix PR prefix labeler for `TYP:` --- .github/pr-prefix-labeler.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pr-prefix-labeler.yml b/.github/pr-prefix-labeler.yml index 4905b502045d..65ed35aa1a11 100644 --- a/.github/pr-prefix-labeler.yml +++ b/.github/pr-prefix-labeler.yml @@ -12,5 +12,5 @@ "REV": "34 - Reversion" "STY": "03 - Maintenance" "TST": "05 - Testing" -"TYP": "static typing" +"TYP": "41 - Static typing" "WIP": "25 - WIP" From a16638b15f2f5b1bedb6df352f1c7b6f8ccad67a Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 28 Jan 2025 18:31:55 +0100 Subject: [PATCH 083/187] CI: Fix typing issue label --- .github/ISSUE_TEMPLATE/typing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/typing.yml b/.github/ISSUE_TEMPLATE/typing.yml index a35b339e4883..17eedfae1c6c 100644 --- a/.github/ISSUE_TEMPLATE/typing.yml +++ b/.github/ISSUE_TEMPLATE/typing.yml @@ -1,7 +1,7 @@ name: Static Typing description: Report an issue with the NumPy typing hints. title: "TYP: " -labels: [Static typing] +labels: [41 - Static typing] body: - type: markdown From d249ff044b2a6ac87c2a542781b9b371c2df3163 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 8 Feb 2025 13:11:19 -0700 Subject: [PATCH 084/187] TYP: Backport typing updates from main Backports of the following PRs: #28295, #28296, #28297, #28298, #28299, #28300, #28301, #28302 #28303 Squashed commits of the following: commit a9267179555caf49c109a13560c3af81b7fc2a7a Author: jorenham Date: Sat Feb 8 16:51:24 2025 +0100 TYP: stub ``lib._datasource`` and fix ``lib._npyio_impl`` commit 443ae333a12abecfad6faa32a55008d2312c7d92 Author: jorenham Date: Sat Feb 8 16:22:58 2025 +0100 TYP: fix and improve ``numpy._core.arrayprint`` commit 3bf77939042838edb417507ec2099951c7ad804b Author: jorenham Date: Sat Feb 8 15:57:32 2025 +0100 TYP: stub ``lib.recfunctions`` commit 80319f25c499a2b87eb271a9e7a81bb0ce3e1f85 Author: jorenham Date: Sat Feb 8 15:52:10 2025 +0100 TYP: stub ``lib.introspect`` commit 1d693e9601aaf9f5bb5a915385cc52a0162efcff Author: jorenham Date: Sat Feb 8 15:49:53 2025 +0100 TYP: stub ``lib.user_array`` and ``lib._user_array_impl`` commit d56f22f263ddf02a01fc15bf576862c51f22addd Author: jorenham Date: Sat Feb 8 15:45:36 2025 +0100 TYP: stub ``numpy.lib._iotools`` commit ca2024aa706a85a5011c2bb55aba29e5d5da40bd Author: jorenham Date: Sat Feb 8 15:39:59 2025 +0100 TYP: stub ``numpy._configtool`` and ``numpy._distributor_init`` commit ec7fdc9ca259b4fedfbb88f606818271fed63ee0 Author: jorenham Date: Sat Feb 8 15:36:46 2025 +0100 TYP: stub ``numpy._expired_attrs_2_0`` commit f2f078c4f81065dcf0855d95067d7c32686e972f Author: jorenham Date: Sat Feb 8 15:30:32 2025 +0100 TYP: stub ``numpy._globals`` --- numpy/__init__.pyi | 11 +- numpy/_configtool.pyi | 1 + numpy/_core/arrayprint.pyi | 188 ++++++-- numpy/_distributor_init.pyi | 1 + numpy/_expired_attrs_2_0.pyi | 63 +++ numpy/_globals.pyi | 15 + numpy/lib/_datasource.pyi | 31 ++ numpy/lib/_iotools.pyi | 106 +++++ numpy/lib/_npyio_impl.pyi | 364 ++++++--------- numpy/lib/_user_array_impl.pyi | 220 +++++++++ numpy/lib/introspect.pyi | 3 + numpy/lib/npyio.pyi | 1 + numpy/lib/recfunctions.pyi | 435 ++++++++++++++++++ numpy/lib/user_array.pyi | 1 + numpy/meson.build | 4 + numpy/typing/tests/data/fail/arrayprint.pyi | 14 +- .../typing/tests/data/pass/lib_user_array.py | 22 + numpy/typing/tests/data/pass/recfunctions.py | 162 +++++++ 18 files changed, 1360 insertions(+), 282 deletions(-) create mode 100644 numpy/_configtool.pyi create mode 100644 numpy/_distributor_init.pyi create mode 100644 numpy/_expired_attrs_2_0.pyi create mode 100644 numpy/_globals.pyi create mode 100644 numpy/lib/_datasource.pyi create mode 100644 numpy/lib/_iotools.pyi create mode 100644 numpy/lib/_user_array_impl.pyi create mode 100644 numpy/lib/introspect.pyi create mode 100644 numpy/lib/recfunctions.pyi create mode 100644 numpy/lib/user_array.pyi create mode 100644 numpy/typing/tests/data/pass/lib_user_array.py create mode 100644 numpy/typing/tests/data/pass/recfunctions.py diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 2876c52b6c3c..1a2d6a08bbb1 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -5,7 +5,6 @@ import mmap import ctypes as ct import array as _array import datetime as dt -import enum from abc import abstractmethod from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias from decimal import Decimal @@ -442,6 +441,8 @@ from numpy._core.shape_base import ( unstack, ) +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ + from numpy.lib import ( scimath as emath, ) @@ -505,6 +506,8 @@ from numpy.lib._function_base_impl import ( quantile, ) +from numpy._globals import _CopyMode + from numpy.lib._histograms_impl import ( histogram_bin_edges, histogram, @@ -1184,7 +1187,6 @@ newaxis: Final[None] = None # not in __all__ __NUMPY_SETUP__: Final[L[False]] = False __numpy_submodules__: Final[set[LiteralString]] = ... -__expired_attributes__: Final[dict[LiteralString, LiteralString]] __former_attrs__: Final[_FormerAttrsDict] = ... __future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... __array_api_version__: Final[L["2023.12"]] = "2023.12" @@ -4912,11 +4914,6 @@ bitwise_right_shift = right_shift permute_dims = transpose pow = power -class _CopyMode(enum.Enum): - ALWAYS: L[True] - IF_NEEDED: L[False] - NEVER: L[2] - class errstate: def __init__( self, diff --git a/numpy/_configtool.pyi b/numpy/_configtool.pyi new file mode 100644 index 000000000000..7e7363e797f3 --- /dev/null +++ b/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 6729b55eecca..661d58a22fe3 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -1,42 +1,52 @@ from collections.abc import Callable -from typing import Any, Literal, TypeAlias, TypedDict, SupportsIndex, type_check_only # Using a private class is by no means ideal, but it is simply a consequence # of a `contextlib.context` returning an instance of aforementioned class from contextlib import _GeneratorContextManager +from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypedDict, overload, type_check_only + +from typing_extensions import deprecated import numpy as np -from numpy import ( - integer, - timedelta64, - datetime64, - floating, - complexfloating, - void, - longdouble, - clongdouble, -) from numpy._typing import NDArray, _CharLike_co, _FloatLike_co +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] + +### + _FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] +_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] +_Sign: TypeAlias = Literal["-", "+", " "] +_Trim: TypeAlias = Literal["k", ".", "0", "-"] +_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] @type_check_only class _FormatDict(TypedDict, total=False): bool: Callable[[np.bool], str] - int: Callable[[integer[Any]], str] - timedelta: Callable[[timedelta64], str] - datetime: Callable[[datetime64], str] - float: Callable[[floating[Any]], str] - longfloat: Callable[[longdouble], str] - complexfloat: Callable[[complexfloating[Any, Any]], str] - longcomplexfloat: Callable[[clongdouble], str] - void: Callable[[void], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] numpystr: Callable[[_CharLike_co], str] object: Callable[[object], str] all: Callable[[object], str] - int_kind: Callable[[integer[Any]], str] - float_kind: Callable[[floating[Any]], str] - complex_kind: Callable[[complexfloating[Any, Any]], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] str_kind: Callable[[_CharLike_co], str] @type_check_only @@ -48,10 +58,14 @@ class _FormatOptions(TypedDict): suppress: bool nanstr: str infstr: str - formatter: None | _FormatDict - sign: Literal["-", "+", " "] + formatter: _FormatDict | None + sign: _Sign floatmode: _FloatMode - legacy: Literal[False, "1.13", "1.21"] + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented def set_printoptions( precision: None | SupportsIndex = ..., @@ -62,37 +76,112 @@ def set_printoptions( nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[False, "1.13", "1.21", "1.25", "2.1"] | None = ..., - override_repr: None | Callable[[NDArray[Any]], str] = ..., + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> None: ... def get_printoptions() -> _FormatOptions: ... + +# public numpy export +@overload # no style def array2string( a: NDArray[Any], - max_line_width: None | int = ..., - precision: None | SupportsIndex = ..., - suppress_small: None | bool = ..., - separator: str = ..., - prefix: str = ..., - # NOTE: With the `style` argument being deprecated, - # all arguments between `formatter` and `suffix` are de facto - # keyworld-only arguments + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", *, - formatter: None | _FormatDict = ..., - threshold: None | int = ..., - edgeitems: None | int = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., - suffix: str = ..., - legacy: Literal[None, False, "1.13", "1.21"] = ..., + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _Legacy | None = None, ) -> str: ... +@overload # style= (positional), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (keyword), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (positional), legacy!="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _LegacyNoStyle | None = None, +) -> str: ... +@overload # style= (keyword), legacy="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _LegacyNoStyle | None = None, +) -> str: ... + def format_float_scientific( x: _FloatLike_co, precision: None | int = ..., unique: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., + trim: _Trim = "k", sign: bool = ..., pad_left: None | int = ..., exp_digits: None | int = ..., @@ -103,7 +192,7 @@ def format_float_positional( precision: None | int = ..., unique: bool = ..., fractional: bool = ..., - trim: Literal["k", ".", "0", "-"] = ..., + trim: _Trim = "k", sign: bool = ..., pad_left: None | int = ..., pad_right: None | int = ..., @@ -130,8 +219,9 @@ def printoptions( nanstr: None | str = ..., infstr: None | str = ..., formatter: None | _FormatDict = ..., - sign: Literal[None, "-", "+", " "] = ..., - floatmode: None | _FloatMode = ..., + sign: None | _Sign = None, + floatmode: _FloatMode | None = None, *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, ) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/numpy/_distributor_init.pyi b/numpy/_distributor_init.pyi new file mode 100644 index 000000000000..94456aba2bcf --- /dev/null +++ b/numpy/_distributor_init.pyi @@ -0,0 +1 @@ +# intentionally left blank diff --git a/numpy/_expired_attrs_2_0.pyi b/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 000000000000..05c630c9b767 --- /dev/null +++ b/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,63 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + compat: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + disp: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/numpy/_globals.pyi b/numpy/_globals.pyi new file mode 100644 index 000000000000..c6b17d68d6b2 --- /dev/null +++ b/numpy/_globals.pyi @@ -0,0 +1,15 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + IF_NEEDED = False + NEVER = 2 + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/numpy/lib/_datasource.pyi b/numpy/lib/_datasource.pyi new file mode 100644 index 000000000000..9f91fdf893a0 --- /dev/null +++ b/numpy/lib/_datasource.pyi @@ -0,0 +1,31 @@ +from pathlib import Path +from typing import IO, Any, TypeAlias + +from _typeshed import OpenBinaryMode, OpenTextMode + +_Mode: TypeAlias = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ..., + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/numpy/lib/_iotools.pyi b/numpy/lib/_iotools.pyi new file mode 100644 index 000000000000..c1591b1a0251 --- /dev/null +++ b/numpy/lib/_iotools.pyi @@ -0,0 +1,106 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import Any, ClassVar, Final, Literal, TypedDict, overload, type_check_only + +from typing_extensions import TypeVar, Unpack + +import numpy as np +import numpy.typing as npt + +_T = TypeVar("_T") + +@type_check_only +class _ValidationKwargs(TypedDict, total=False): + excludelist: Iterable[str] | None + deletechars: Iterable[str] | None + case_sensitive: Literal["upper", "lower"] | bool | None + replace_space: str + +### + +__docformat__: Final[str] = "restructuredtext en" + +class ConverterError(Exception): ... +class ConverterLockError(ConverterError): ... +class ConversionWarning(UserWarning): ... + +class LineSplitter: + delimiter: str | int | Iterable[int] | None + comments: str + encoding: str | None + + def __init__( + self, + /, + delimiter: str | bytes | int | Iterable[int] | None = None, + comments: str | bytes = "#", + autostrip: bool = True, + encoding: str | None = None, + ) -> None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] + defaultdeletechars: ClassVar[Sequence[str]] + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... + +# +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype[Any]]: ... +def easy_dtype( + ndtype: npt.DTypeLike, + names: Iterable[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_ValidationKwargs], +) -> np.dtype[np.void]: ... diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 2ab86575601c..19257a802d44 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -1,359 +1,285 @@ -import zipfile import types -from _typeshed import StrOrBytesPath, StrPath, SupportsRead, SupportsWrite, SupportsKeysAndGetItem +import zipfile +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence from re import Pattern -from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable -from typing import ( - Literal as L, - Any, - TypeVar, - Generic, - IO, - overload, - Protocol, - type_check_only, -) -from typing_extensions import deprecated +from typing import IO, Any, ClassVar, Generic, Protocol, TypeAlias, overload, type_check_only +from typing import Literal as L -from numpy import ( - recarray, - dtype, - generic, - float64, - void, - record, -) -from numpy.ma.mrecords import MaskedRecords +from _typeshed import StrOrBytesPath, StrPath, SupportsKeysAndGetItem, SupportsRead, SupportsWrite +from typing_extensions import Self, TypeVar, deprecated, override + +import numpy as np from numpy._core.multiarray import packbits, unpackbits -from numpy._typing import ( - ArrayLike, - DTypeLike, - NDArray, - _DTypeLike, - _SupportsArrayFunc, -) +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc +from numpy.ma.mrecords import MaskedRecords + +from ._datasource import DataSource as DataSource __all__ = [ - "savetxt", - "loadtxt", + "fromregex", "genfromtxt", "load", + "loadtxt", + "packbits", "save", + "savetxt", "savez", "savez_compressed", - "packbits", "unpackbits", - "fromregex", ] -_T = TypeVar("_T") -_T_contra = TypeVar("_T_contra", contravariant=True) _T_co = TypeVar("_T_co", covariant=True) -_SCT = TypeVar("_SCT", bound=generic) +_SCT = TypeVar("_SCT", bound=np.generic) +_SCT_co = TypeVar("_SCT_co", bound=np.generic, default=Any, covariant=True) + +_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] +_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] +_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[bytes] @type_check_only class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): def seek(self, offset: int, whence: int, /) -> object: ... class BagObj(Generic[_T_co]): - def __init__(self, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... - def __getattribute__(self, key: str) -> _T_co: ... + def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str, /) -> _T_co: ... def __dir__(self) -> list[str]: ... -class NpzFile(Mapping[str, NDArray[Any]]): +class NpzFile(Mapping[str, NDArray[_SCT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + zip: zipfile.ZipFile - fid: None | IO[str] + fid: IO[str] | None files: list[str] allow_pickle: bool - pickle_kwargs: None | Mapping[str, Any] - _MAX_REPR_ARRAY_COUNT: int - # Represent `f` as a mutable property so we can access the type of `self` - @property - def f(self: _T) -> BagObj[_T]: ... - @f.setter - def f(self: _T, value: BagObj[_T]) -> None: ... + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_SCT_co]] + + # def __init__( self, - fid: IO[str], - own_fid: bool = ..., - allow_pickle: bool = ..., - pickle_kwargs: None | Mapping[str, Any] = ..., - ) -> None: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - exc_type: None | type[BaseException], - exc_value: None | BaseException, - traceback: None | types.TracebackType, /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, ) -> None: ... - def close(self) -> None: ... def __del__(self) -> None: ... - def __iter__(self) -> Iterator[str]: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override def __len__(self) -> int: ... - def __getitem__(self, key: str) -> NDArray[Any]: ... - def __contains__(self, key: str) -> bool: ... - def __repr__(self) -> str: ... - -class DataSource: - def __init__(self, destpath: StrPath | None = ...) -> None: ... - def __del__(self) -> None: ... - def abspath(self, path: str) -> str: ... - def exists(self, path: str) -> bool: ... - - # Whether the file-object is opened in string or bytes mode (by default) - # depends on the file-extension of `path` - def open( - self, - path: str, - mode: str = ..., - encoding: None | str = ..., - newline: None | str = ..., - ) -> IO[Any]: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_SCT_co]: ... + def close(self) -> None: ... # NOTE: Returns a `NpzFile` if file is a zip file; # returns an `ndarray`/`memmap` otherwise def load( file: StrOrBytesPath | _SupportsReadSeek[bytes], - mmap_mode: L[None, "r+", "r", "w+", "c"] = ..., - allow_pickle: bool = ..., - fix_imports: bool = ..., - encoding: L["ASCII", "latin1", "bytes"] = ..., + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, ) -> Any: ... @overload -def save( - file: StrPath | SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool = ..., -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... @overload @deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save( - file: StrPath | SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool = ..., - *, - fix_imports: bool, -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... @overload @deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") -def save( - file: StrPath | SupportsWrite[bytes], - arr: ArrayLike, - allow_pickle: bool, - fix_imports: bool, -) -> None: ... +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... -def savez( - file: StrPath | SupportsWrite[bytes], - *args: ArrayLike, - allow_pickle: bool = ..., - **kwds: ArrayLike, -) -> None: ... +# +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... -def savez_compressed( - file: StrPath | SupportsWrite[bytes], - *args: ArrayLike, - allow_pickle: bool = ..., - **kwds: ArrayLike, -) -> None: ... +# +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... # File-like objects only have to implement `__iter__` and, # optionally, `encoding` @overload def loadtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - dtype: None = ..., - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... -) -> NDArray[float64]: ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... @overload def loadtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: _DTypeLike[_SCT], - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[_SCT]: ... @overload def loadtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: DTypeLike, - comments: None | str | Sequence[str] = ..., - delimiter: None | str = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] = ..., - skiprows: int = ..., - usecols: int | Sequence[int] | None = ..., - unpack: bool = ..., - ndmin: L[0, 1, 2] = ..., - encoding: None | str = ..., - max_rows: None | int = ..., + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, *, - quotechar: None | str = ..., - like: None | _SupportsArrayFunc = ... + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, ) -> NDArray[Any]: ... def savetxt( - fname: StrPath | SupportsWrite[str] | SupportsWrite[bytes], + fname: StrPath | _FNameWrite, X: ArrayLike, - fmt: str | Sequence[str] = ..., - delimiter: str = ..., - newline: str = ..., - header: str = ..., - footer: str = ..., - comments: str = ..., - encoding: None | str = ..., + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, ) -> None: ... @overload def fromregex( - file: StrPath | SupportsRead[str] | SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: _DTypeLike[_SCT], - encoding: None | str = ... + encoding: str | None = None, ) -> NDArray[_SCT]: ... @overload def fromregex( - file: StrPath | SupportsRead[str] | SupportsRead[bytes], + file: _FNameRead, regexp: str | bytes | Pattern[Any], dtype: DTypeLike, - encoding: None | str = ... + encoding: str | None = None, ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - dtype: None = ..., + fname: _FName, + dtype: None = None, comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload def genfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: _DTypeLike[_SCT], comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[_SCT]: ... @overload def genfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], + fname: _FName, dtype: DTypeLike, comments: str = ..., - delimiter: None | str | int | Iterable[int] = ..., + delimiter: str | int | Iterable[int] | None = ..., skip_header: int = ..., skip_footer: int = ..., - converters: None | Mapping[int | str, Callable[[str], Any]] = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., missing_values: Any = ..., filling_values: Any = ..., - usecols: None | Sequence[int] = ..., - names: L[None, True] | str | Collection[str] = ..., - excludelist: None | Sequence[str] = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., deletechars: str = ..., replace_space: str = ..., autostrip: bool = ..., - case_sensitive: bool | L['upper', 'lower'] = ..., + case_sensitive: bool | L["upper", "lower"] = ..., defaultfmt: str = ..., - unpack: None | bool = ..., + unpack: bool | None = ..., usemask: bool = ..., loose: bool = ..., invalid_raise: bool = ..., - max_rows: None | int = ..., + max_rows: int | None = ..., encoding: str = ..., *, ndmin: L[0, 1, 2] = ..., - like: None | _SupportsArrayFunc = ..., + like: _SupportsArrayFunc | None = ..., ) -> NDArray[Any]: ... @overload -def recfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... +def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... @overload -def recfromtxt( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... +def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... @overload -def recfromcsv( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[False] = ..., - **kwargs: Any, -) -> recarray[Any, dtype[record]]: ... +def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... @overload -def recfromcsv( - fname: StrPath | Iterable[str] | Iterable[bytes], - *, - usemask: L[True], - **kwargs: Any, -) -> MaskedRecords[Any, dtype[void]]: ... +def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/numpy/lib/_user_array_impl.pyi b/numpy/lib/_user_array_impl.pyi new file mode 100644 index 000000000000..d5dfb0573c71 --- /dev/null +++ b/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,220 @@ +from types import EllipsisType +from typing import Any, Generic, SupportsIndex, TypeAlias, TypeVar, overload + +from _typeshed import Incomplete +from typing_extensions import Self, deprecated, override + +import numpy as np +import numpy.typing as npt +from numpy._typing import _ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co, _DTypeLike + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=Any, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], default=np.dtype[Any], covariant=True) + +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_RealContainerT = TypeVar( + "_RealContainerT", + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], +) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) + +_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] + +_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None +_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] +_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +### + +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[_ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[Any, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype[Any]]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + + # + def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __div__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rdiv__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __idiv__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__(self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /) -> container[Any, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[Any, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + + # + def copy(self, /) -> Self: ... + @deprecated("tostring() is deprecated. Use tobytes() instead.") + def tostring(self, /) -> bytes: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/numpy/lib/introspect.pyi b/numpy/lib/introspect.pyi new file mode 100644 index 000000000000..7929981cd636 --- /dev/null +++ b/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi index c3258e88d04f..fd3ae8f5a287 100644 --- a/numpy/lib/npyio.pyi +++ b/numpy/lib/npyio.pyi @@ -1,4 +1,5 @@ from numpy.lib._npyio_impl import ( DataSource as DataSource, NpzFile as NpzFile, + __doc__ as __doc__, ) diff --git a/numpy/lib/recfunctions.pyi b/numpy/lib/recfunctions.pyi new file mode 100644 index 000000000000..442530e9cd39 --- /dev/null +++ b/numpy/lib/recfunctions.pyi @@ -0,0 +1,435 @@ +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _DTypeLike, _DTypeLikeVoid +from numpy.ma.mrecords import MaskedRecords + +__all__ = [ + "append_fields", + "apply_along_fields", + "assign_fields_by_name", + "drop_fields", + "find_duplicates", + "flatten_descr", + "get_fieldstructure", + "get_names", + "get_names_flat", + "join_by", + "merge_arrays", + "rec_append_fields", + "rec_drop_fields", + "rec_join", + "recursive_fill_fields", + "rename_fields", + "repack_fields", + "require_fields", + "stack_arrays", + "structured_to_unstructured", + "unstructured_to_structured", +] + +_T = TypeVar("_T") +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) +_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) +_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) + +_OneOrMany: TypeAlias = _T | Iterable[_T] +_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] + +_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] +_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType + +_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] + +### + +def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype[Any]]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays( + seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype[Any]]] | np.ndarray[_ShapeT, np.dtype[Any]], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[Any, np.dtype[np.void]]: ... + +# +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields( + base: MaskedRecords[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.recarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype[Any]] | None = None, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +@overload +def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +@overload +def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[_ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields( + func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], + arr: np.ndarray[_ShapeT, np.dtype[np.void]], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields( + array: np.ndarray[_ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays( + arrays: _ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> _ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[tuple[int, ...], np.dtype[np.void]]: ... + +# +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/numpy/lib/user_array.pyi b/numpy/lib/user_array.pyi new file mode 100644 index 000000000000..9b90d893326b --- /dev/null +++ b/numpy/lib/user_array.pyi @@ -0,0 +1 @@ +from ._user_array_impl import container as container diff --git a/numpy/meson.build b/numpy/meson.build index 3faebf17d90e..6fef05b9113f 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -276,11 +276,15 @@ python_sources = [ '_array_api_info.py', '_array_api_info.pyi', '_configtool.py', + '_configtool.pyi', '_distributor_init.py', + '_distributor_init.pyi', '_globals.py', + '_globals.pyi', '_pytesttester.py', '_pytesttester.pyi', '_expired_attrs_2_0.py', + '_expired_attrs_2_0.pyi', 'conftest.py', 'ctypeslib.py', 'ctypeslib.pyi', diff --git a/numpy/typing/tests/data/fail/arrayprint.pyi b/numpy/typing/tests/data/fail/arrayprint.pyi index f8c8a3237816..486c11e79868 100644 --- a/numpy/typing/tests/data/fail/arrayprint.pyi +++ b/numpy/typing/tests/data/fail/arrayprint.pyi @@ -6,11 +6,11 @@ import numpy.typing as npt AR: npt.NDArray[np.float64] func1: Callable[[Any], str] -func2: Callable[[np.integer[Any]], str] +func2: Callable[[np.integer], str] -np.array2string(AR, style=None) # E: Unexpected keyword argument -np.array2string(AR, legacy="1.14") # E: incompatible type -np.array2string(AR, sign="*") # E: incompatible type -np.array2string(AR, floatmode="default") # E: incompatible type -np.array2string(AR, formatter={"A": func1}) # E: incompatible type -np.array2string(AR, formatter={"float": func2}) # E: Incompatible types +np.array2string(AR, style=None) # E: No overload variant +np.array2string(AR, legacy="1.14") # E: No overload variant +np.array2string(AR, sign="*") # E: No overload variant +np.array2string(AR, floatmode="default") # E: No overload variant +np.array2string(AR, formatter={"A": func1}) # E: No overload variant +np.array2string(AR, formatter={"float": func2}) # E: No overload variant diff --git a/numpy/typing/tests/data/pass/lib_user_array.py b/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 000000000000..62b7e85d7ff1 --- /dev/null +++ b/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/numpy/typing/tests/data/pass/recfunctions.py b/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 000000000000..03322e064be4 --- /dev/null +++ b/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,162 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any +from typing_extensions import assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((int(3),), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((int(2),), np.int_), + np.ones((int(3),), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((int(2),), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) + zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[int, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[Any, np.dtype[np.void]], + np.ndarray[Any, np.dtype[np.int_]], + ], + ) From de08196c01fe01de031e4ec22fe77335f663f71d Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Feb 2025 11:52:17 -0700 Subject: [PATCH 085/187] BUG: fix race initializing legacy dtype casts --- numpy/_core/src/multiarray/convert_datatype.c | 144 ++++++++++++------ numpy/_core/tests/test_multithreading.py | 96 ++++++++++++ 2 files changed, 190 insertions(+), 50 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 1dff38a1d1ef..1847e18d4679 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -62,46 +62,24 @@ static PyObject * PyArray_GetObjectToGenericCastingImpl(void); -/** - * Fetch the casting implementation from one DType to another. - * - * @param from The implementation to cast from - * @param to The implementation to cast to - * - * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an - * error set. - */ -NPY_NO_EXPORT PyObject * -PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +static PyObject * +create_casting_impl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { - PyObject *res; - if (from == to) { - res = (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl; - } - else { - res = PyDict_GetItemWithError(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to); - } - if (res != NULL || PyErr_Occurred()) { - Py_XINCREF(res); - return res; - } /* - * The following code looks up CastingImpl based on the fact that anything + * Look up CastingImpl based on the fact that anything * can be cast to and from objects or structured (void) dtypes. - * - * The last part adds casts dynamically based on legacy definition */ if (from->type_num == NPY_OBJECT) { - res = PyArray_GetObjectToGenericCastingImpl(); + return PyArray_GetObjectToGenericCastingImpl(); } else if (to->type_num == NPY_OBJECT) { - res = PyArray_GetGenericToObjectCastingImpl(); + return PyArray_GetGenericToObjectCastingImpl(); } else if (from->type_num == NPY_VOID) { - res = PyArray_GetVoidToGenericCastingImpl(); + return PyArray_GetVoidToGenericCastingImpl(); } else if (to->type_num == NPY_VOID) { - res = PyArray_GetGenericToVoidCastingImpl(); + return PyArray_GetGenericToVoidCastingImpl(); } /* * Reject non-legacy dtypes. They need to use the new API to add casts and @@ -124,43 +102,104 @@ PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) PyArray_VectorUnaryFunc *castfunc = PyArray_GetCastFunc( from->singleton, to->type_num); if (castfunc == NULL) { - PyErr_Clear(); - /* Remember that this cast is not possible */ - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *) to, Py_None) < 0) { - return NULL; - } - Py_RETURN_NONE; + PyErr_Clear(); + Py_RETURN_NONE; } } - - /* PyArray_AddLegacyWrapping_CastingImpl find the correct casting level: */ - /* - * TODO: Possibly move this to the cast registration time. But if we do - * that, we have to also update the cast when the casting safety - * is registered. + /* Create a cast using the state of the legacy casting setup defined + * during the setup of the DType. + * + * Ideally we would do this when we create the DType, but legacy user + * DTypes don't have a way to signal that a DType is done setting up + * casts. Without such a mechanism, the safest way to know that a + * DType is done setting up is to register the cast lazily the first + * time a user does the cast. + * + * We *could* register the casts when we create the wrapping + * DTypeMeta, but that means the internals of the legacy user DType + * system would need to update the state of the casting safety flags + * in the cast implementations stored on the DTypeMeta. That's an + * inversion of abstractions and would be tricky to do without + * creating circular dependencies inside NumPy. */ if (PyArray_AddLegacyWrapping_CastingImpl(from, to, -1) < 0) { return NULL; } + /* castingimpls is unconditionally filled by + * AddLegacyWrapping_CastingImpl, so this won't create a recursive + * critical section + */ return PyArray_GetCastingImpl(from, to); } +} - if (res == NULL) { +static PyObject * +ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + int return_error = 0; + PyObject *res = NULL; + + /* Need to create the cast. This might happen at runtime so we enter a + critical section to avoid races */ + + Py_BEGIN_CRITICAL_SECTION(NPY_DT_SLOTS(from)->castingimpls); + + /* check if another thread filled it while this thread was blocked on + acquiring the critical section */ + if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to, + &res) < 0) { + return_error = 1; + } + + if (!return_error) { + res = create_casting_impl(from, to); + if (res == NULL) { + return_error = 1; + } + } + if (!return_error && + PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, res) < 0) { + return_error = 1; + } + Py_END_CRITICAL_SECTION(); + if (return_error) { + Py_XDECREF(res); return NULL; } - if (from == to) { + if (from == to && res == Py_None) { PyErr_Format(PyExc_RuntimeError, "Internal NumPy error, within-DType cast missing for %S!", from); - Py_DECREF(res); return NULL; } - if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *)to, res) < 0) { - Py_DECREF(res); + return res; +} + +/** + * Fetch the casting implementation from one DType to another. + * + * @param from The implementation to cast from + * @param to The implementation to cast to + * + * @returns A castingimpl (PyArrayDTypeMethod *), None or NULL with an + * error set. + */ +NPY_NO_EXPORT PyObject * +PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) +{ + PyObject *res; + if (from == to) { + res = Py_XNewRef((PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl); + } + else if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, &res) < 0) { return NULL; } - return res; + if (res != NULL) { + return res; + } + + return ensure_castingimpl_exists(from, to); } @@ -409,7 +448,7 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, * implementations fully to have them available for doing the actual cast * later. * - * @param from The descriptor to cast from + * @param from The descriptor to cast from * @param to The descriptor to cast to (may be NULL) * @param to_dtype If `to` is NULL, must pass the to_dtype (otherwise this * is ignored). @@ -2031,6 +2070,11 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth) /** * Add a new casting implementation using a PyArrayMethod_Spec. * + * Using this function outside of module initialization without holding a + * critical section on the castingimpls dict may lead to a race to fill the + * dict. Use PyArray_GetGastingImpl to lazily register casts at runtime + * safely. + * * @param spec The specification to use as a source * @param private If private, allow slots not publicly exposed. * @return 0 on success -1 on failure diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 2ddca57dbd0b..e12edd0c15be 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -5,6 +5,7 @@ from numpy.testing import IS_WASM from numpy.testing._private.utils import run_threaded +from numpy._core import _rational_tests if IS_WASM: pytest.skip(allow_module_level=True, reason="no threading support in wasm") @@ -165,3 +166,98 @@ def closure(b): x = np.repeat(x0, 2, axis=0)[::2] run_threaded(closure, max_workers=10, pass_barrier=True) + + +def test_structured_advanced_indexing(): + # Test that copyswap(n) used by integer array indexing is threadsafe + # for structured datatypes, see gh-15387. This test can behave randomly. + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)] * 2) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0] + + rng = np.random.default_rng() + + def func(arr): + indx = rng.integers(0, len(arr), size=6000, dtype=np.intp) + arr[indx] + + tpe = concurrent.futures.ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +def test_structured_threadsafety2(): + # Nonzero (and some other functions) should be threadsafe for + # structured datatypes, see gh-15387. This test can behave randomly. + from concurrent.futures import ThreadPoolExecutor + + # Create a deeply nested dtype to make a failure more likely: + dt = np.dtype([("", "f8")]) + dt = np.dtype([("", dt)]) + dt = np.dtype([("", dt)] * 2) + # The array should be large enough to likely run into threading issues + arr = np.random.uniform(size=(5000, 4)).view(dt)[:, 0] + + def func(arr): + arr.nonzero() + + tpe = ThreadPoolExecutor(max_workers=8) + futures = [tpe.submit(func, arr) for _ in range(10)] + for f in futures: + f.result() + + assert arr.dtype is dt + + +def test_stringdtype_multithreaded_access_and_mutation( + dtype, random_string_list): + # this test uses an RNG and may crash or cause deadlocks if there is a + # threading bug + rng = np.random.default_rng(0x4D3D3D3) + + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = rng.choice(chars, size=100 * 10, replace=True) + random_string_list = ret.view("U100") + + def func(arr): + rnd = rng.random() + # either write to random locations in the array, compute a ufunc, or + # re-initialize the array + if rnd < 0.25: + num = np.random.randint(0, arr.size) + arr[num] = arr[num] + "hello" + elif rnd < 0.5: + if rnd < 0.375: + np.add(arr, arr) + else: + np.add(arr, arr, out=arr) + elif rnd < 0.75: + if rnd < 0.875: + np.multiply(arr, np.int64(2)) + else: + np.multiply(arr, np.int64(2), out=arr) + else: + arr[:] = random_string_list + + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + arr = np.array(random_string_list, dtype=dtype) + futures = [tpe.submit(func, arr) for _ in range(500)] + + for f in futures: + f.result() + + +def test_legacy_usertype_cast_init_thread_safety(): + def closure(b): + b.wait() + np.full((10, 10), 1, _rational_tests.rational) + + run_threaded(closure, 1000, pass_barrier=True) From ebb0a850559f8201ca4171d2be2240878f621dbb Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Feb 2025 14:38:55 -0700 Subject: [PATCH 086/187] CI: remove pytest-xdist from TSAN and ASAN CI python env --- .github/workflows/compiler_sanitizers.yml | 127 ++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 .github/workflows/compiler_sanitizers.yml diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml new file mode 100644 index 000000000000..9477e0be1bd1 --- /dev/null +++ b/.github/workflows/compiler_sanitizers.yml @@ -0,0 +1,127 @@ +name: Test with compiler sanitizers + +on: + push: + branches: + - main + pull_request: + branches: + - main + - maintenance/** + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + clang_ASAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@19 + LLVM_PREFIX=$(brew --prefix llvm@19) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with address sanitizer + run: | + CONFIGURE_OPTS="--with-address-sanitizer" pyenv install 3.13 + pyenv global 3.13 + - name: Install dependencies + run: | + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the ASAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=address + - name: Test + run: | + # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:halt_on_error=1 \ + python -m spin test -- -v -s --timeout=600 --durations=10 + + clang_TSAN: + # To enable this workflow on a fork, comment out: + if: github.repository == 'numpy/numpy' + runs-on: macos-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + - name: Set up pyenv + run: | + git clone https://github.com/pyenv/pyenv.git "$HOME/.pyenv" + PYENV_ROOT="$HOME/.pyenv" + PYENV_BIN="$PYENV_ROOT/bin" + PYENV_SHIMS="$PYENV_ROOT/shims" + echo "$PYENV_BIN" >> $GITHUB_PATH + echo "$PYENV_SHIMS" >> $GITHUB_PATH + echo "PYENV_ROOT=$PYENV_ROOT" >> $GITHUB_ENV + - name: Check pyenv is working + run: + pyenv --version + - name: Set up LLVM + run: | + brew install llvm@19 + LLVM_PREFIX=$(brew --prefix llvm@19) + echo CC="$LLVM_PREFIX/bin/clang" >> $GITHUB_ENV + echo CXX="$LLVM_PREFIX/bin/clang++" >> $GITHUB_ENV + echo LDFLAGS="-L$LLVM_PREFIX/lib" >> $GITHUB_ENV + echo CPPFLAGS="-I$LLVM_PREFIX/include" >> $GITHUB_ENV + - name: Build Python with thread sanitizer support + run: | + # free-threaded Python is much more likely to trigger races + CONFIGURE_OPTS="--with-thread-sanitizer" pyenv install 3.13t + pyenv global 3.13t + - name: Install dependencies + run: | + # TODO: remove when a released cython supports free-threaded python + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + pip install -r requirements/build_requirements.txt + pip install -r requirements/ci_requirements.txt + pip install -r requirements/test_requirements.txt + # xdist captures stdout/stderr, but we want the TSAN output + pip uninstall -y pytest-xdist + - name: Build + run: + python -m spin build -j2 -- -Db_sanitize=thread + - name: Test + run: | + # These tests are slow, so only run tests in files that do "import threading" to make them count + TSAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1 \ + python -m spin test \ + `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ + -- -v -s --timeout=600 --durations=10 From 036d7d5cbb6987bb8fd400f78c0286dcb941f7c4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Feb 2025 14:39:41 -0700 Subject: [PATCH 087/187] BUG: fix logic error in ensure_castingimpl_exists --- numpy/_core/src/multiarray/convert_datatype.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 1847e18d4679..56796e386f75 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -151,7 +151,7 @@ ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) return_error = 1; } - if (!return_error) { + if (res == NULL && !return_error) { res = create_casting_impl(from, to); if (res == NULL) { return_error = 1; From 73698cf31b7d8665d76e2b427b2800d55f2383c3 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Feb 2025 14:39:58 -0700 Subject: [PATCH 088/187] BUG: fix data race setting up within dtype cats for legacy user dtypes --- numpy/_core/src/multiarray/convert_datatype.c | 7 +++++-- numpy/_core/src/multiarray/dtypemeta.c | 6 ++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 56796e386f75..f9736610fb32 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -187,9 +187,12 @@ ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) NPY_NO_EXPORT PyObject * PyArray_GetCastingImpl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) { - PyObject *res; + PyObject *res = NULL; if (from == to) { - res = Py_XNewRef((PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl); + if ((NPY_DT_SLOTS(from)->within_dtype_castingimpl) != NULL) { + res = Py_XNewRef( + (PyObject *)NPY_DT_SLOTS(from)->within_dtype_castingimpl); + } } else if (PyDict_GetItemRef(NPY_DT_SLOTS(from)->castingimpls, (PyObject *)to, &res) < 0) { diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index a60e6fd59fd9..0b1b0fb39192 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -1252,6 +1252,12 @@ dtypemeta_wrap_legacy_descriptor( return -1; } } + else { + // ensure the within dtype cast is populated for legacy user dtypes + if (PyArray_GetCastingImpl(dtype_class, dtype_class) == NULL) { + return -1; + } + } return 0; } From e5411a3acd54008ec30fd7a59c1c1fb505a1dc2c Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Thu, 6 Feb 2025 15:16:52 -0700 Subject: [PATCH 089/187] MAINT: spawn fewer threads to hopefully fix 32 bit runners --- numpy/_core/tests/test_multithreading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index e12edd0c15be..da8195b9f14d 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -260,4 +260,4 @@ def closure(b): b.wait() np.full((10, 10), 1, _rational_tests.rational) - run_threaded(closure, 1000, pass_barrier=True) + run_threaded(closure, 250, pass_barrier=True) From f3c47ac9fe6a90cbc7cfabe1bf6743a271d08c02 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 12:53:57 -0700 Subject: [PATCH 090/187] BUG: fix reference counting error --- numpy/_core/src/multiarray/convert_datatype.c | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index f9736610fb32..2d8393f1b777 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -170,6 +170,7 @@ ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) if (from == to && res == Py_None) { PyErr_Format(PyExc_RuntimeError, "Internal NumPy error, within-DType cast missing for %S!", from); + Py_DECREF(res); return NULL; } return res; From 67f0402dc5041f17c56dc2ee3096bed78a9cbe40 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 12:54:30 -0700 Subject: [PATCH 091/187] BUG: fix resource cleanup when thread spawning errors --- numpy/_core/tests/test_multithreading.py | 9 ++++++++- numpy/testing/_private/utils.py | 11 +++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index da8195b9f14d..d65b84db22d9 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -260,4 +260,11 @@ def closure(b): b.wait() np.full((10, 10), 1, _rational_tests.rational) - run_threaded(closure, 250, pass_barrier=True) + try: + run_threaded(closure, 250, pass_barrier=True) + except RuntimeError: + # couldn't spawn enough threads, so skip this test on this system + # for whatever reason the 32 bit linux runner will trigger + # this. I can trigger it on my Linux laptop with 500 threads but + # the runner is more resource-constrained + pytest.skip("Couldn't spawn enough threads to run the test") diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4a97ff111cd7..e20c5c529d20 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2700,8 +2700,15 @@ def run_threaded(func, max_workers=8, pass_count=False, barrier = threading.Barrier(max_workers) args.append(barrier) if pass_count: - futures = [tpe.submit(func, i, *args) for i in range(max_workers)] + all_func_args = [(func, i, *args) for i in range(max_workers)] else: - futures = [tpe.submit(func, *args) for _ in range(max_workers)] + all_func_args = [(func, *args) for i in range(max_workers)] + try: + futures = [tpe.submit(*func_args) for func_args in + all_func_args] + except BaseException: + if pass_barrier: + barrier.abort() + raise for f in futures: f.result() From 425d162c15cc9cd0879269a96d8971ec1f632906 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 13:31:20 -0700 Subject: [PATCH 092/187] MAINT: refactor run_threaded to use a try/finally block --- numpy/testing/_private/utils.py | 45 +++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e20c5c529d20..1cfd0e03cb0e 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2690,25 +2690,32 @@ def run_threaded(func, max_workers=8, pass_count=False, prepare_args=None): """Runs a function many times in parallel""" for _ in range(outer_iterations): - with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) - as tpe): - if prepare_args is None: - args = [] - else: - args = prepare_args() - if pass_barrier: - barrier = threading.Barrier(max_workers) - args.append(barrier) - if pass_count: - all_func_args = [(func, i, *args) for i in range(max_workers)] - else: - all_func_args = [(func, *args) for i in range(max_workers)] - try: - futures = [tpe.submit(*func_args) for func_args in - all_func_args] - except BaseException: + if pass_barrier: + barrier = threading.Barrier(max_workers) + try: + with (concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers) as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() if pass_barrier: - barrier.abort() - raise + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for _ in range(max_workers)] + futures = [tpe.submit(*func_args) for func_args in all_args] for f in futures: f.result() + finally: + if pass_barrier: + barrier.abort() + + +def get_stringdtype_dtype(na_object, coerce=True): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) From cac1f10a1da94f3a675f782a773cea7e182160d4 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 13:39:10 -0700 Subject: [PATCH 093/187] MAINT: clean up slightly --- numpy/testing/_private/utils.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 1cfd0e03cb0e..b48fdafa9afc 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2702,12 +2702,11 @@ def run_threaded(func, max_workers=8, pass_count=False, if pass_barrier: args.append(barrier) if pass_count: - all_args = [(func, i, *args) for i in range(max_workers)] + futures = [tpe.submit(func, i, *args) for i in range(max_workers)] else: - all_args = [(func, *args) for _ in range(max_workers)] - futures = [tpe.submit(*func_args) for func_args in all_args] - for f in futures: - f.result() + futures = [tpe.submit(func, *args) for _ in range(max_workers)] + for f in futures: + f.result() finally: if pass_barrier: barrier.abort() From e6f581dfc5cd8955aa37db0576e5678cad615a69 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 13:40:44 -0700 Subject: [PATCH 094/187] MAINT: fix linter --- numpy/testing/_private/utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index b48fdafa9afc..e9da9663100b 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2702,9 +2702,11 @@ def run_threaded(func, max_workers=8, pass_count=False, if pass_barrier: args.append(barrier) if pass_count: - futures = [tpe.submit(func, i, *args) for i in range(max_workers)] + futures = [ + tpe.submit(func, i, *args) for i in range(max_workers)] else: - futures = [tpe.submit(func, *args) for _ in range(max_workers)] + futures = [ + tpe.submit(func, *args) for _ in range(max_workers)] for f in futures: f.result() finally: From 3f8fbd6a7494078558897cafcd40c5288452fb72 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 14:24:55 -0700 Subject: [PATCH 095/187] MAINT: go back to try/except --- numpy/testing/_private/utils.py | 41 +++++++++++++++++---------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index e9da9663100b..9343ab941a04 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2690,28 +2690,29 @@ def run_threaded(func, max_workers=8, pass_count=False, prepare_args=None): """Runs a function many times in parallel""" for _ in range(outer_iterations): - if pass_barrier: - barrier = threading.Barrier(max_workers) - try: - with (concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers) as tpe): - if prepare_args is None: - args = [] - else: - args = prepare_args() - if pass_barrier: - args.append(barrier) + with (concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers) as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + try: if pass_count: - futures = [ - tpe.submit(func, i, *args) for i in range(max_workers)] + futures = [tpe.submit(func, i, *args) for i in + range(max_workers)] else: - futures = [ - tpe.submit(func, *args) for _ in range(max_workers)] - for f in futures: - f.result() - finally: - if pass_barrier: - barrier.abort() + futures = [tpe.submit(func, *args) for _ in + range(max_workers)] + except RuntimeError: + # python raises RuntimeError when it can't spawn new threads + if pass_barrier: + barrier.abort() + raise + for f in futures: + f.result() def get_stringdtype_dtype(na_object, coerce=True): From d4946475127870237d692df15edabb27d8fb2ef8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 7 Feb 2025 15:26:37 -0700 Subject: [PATCH 096/187] MAINT: fix indentation and clarify comment --- numpy/_core/src/multiarray/convert_datatype.c | 4 ++-- numpy/_core/tests/test_multithreading.py | 9 +++++---- numpy/testing/_private/utils.py | 4 ++-- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 2d8393f1b777..bf8074e0250e 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -102,8 +102,8 @@ create_casting_impl(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) PyArray_VectorUnaryFunc *castfunc = PyArray_GetCastFunc( from->singleton, to->type_num); if (castfunc == NULL) { - PyErr_Clear(); - Py_RETURN_NONE; + PyErr_Clear(); + Py_RETURN_NONE; } } /* Create a cast using the state of the legacy casting setup defined diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index d65b84db22d9..f81d1f672f59 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -263,8 +263,9 @@ def closure(b): try: run_threaded(closure, 250, pass_barrier=True) except RuntimeError: - # couldn't spawn enough threads, so skip this test on this system - # for whatever reason the 32 bit linux runner will trigger - # this. I can trigger it on my Linux laptop with 500 threads but - # the runner is more resource-constrained + # The 32 bit linux runner will trigger this with 250 threads. I can + # trigger it on my Linux laptop with 500 threads but the CI runner is + # more resource-constrained. + # Reducing the number of threads means the test doesn't trigger the + # bug. Better to skip on some platforms than add a useless test. pytest.skip("Couldn't spawn enough threads to run the test") diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 9343ab941a04..1e631c30e5fd 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2690,8 +2690,8 @@ def run_threaded(func, max_workers=8, pass_count=False, prepare_args=None): """Runs a function many times in parallel""" for _ in range(outer_iterations): - with (concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers) as tpe): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): if prepare_args is None: args = [] else: From c20ac888de1d45c44c8d3a0e972a23e781a322ec Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Sat, 8 Feb 2025 06:11:23 -0700 Subject: [PATCH 097/187] MAINT: use a try/finally to make the deadlock protection more robust --- numpy/testing/_private/utils.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 1e631c30e5fd..4db01f86f2fa 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2699,18 +2699,19 @@ def run_threaded(func, max_workers=8, pass_count=False, if pass_barrier: barrier = threading.Barrier(max_workers) args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] try: - if pass_count: - futures = [tpe.submit(func, i, *args) for i in - range(max_workers)] - else: - futures = [tpe.submit(func, *args) for _ in - range(max_workers)] - except RuntimeError: - # python raises RuntimeError when it can't spawn new threads - if pass_barrier: + n_submitted = 0 + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + n_submitted += 1 + finally: + if n_submitted < max_workers and pass_barrier: barrier.abort() - raise for f in futures: f.result() From 96ca7e3b248878b16ad197da395099033d06ddf8 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 11 Feb 2025 08:52:21 -0700 Subject: [PATCH 098/187] MAINT: respond to code review --- numpy/_core/src/multiarray/convert_datatype.c | 12 +++++------- numpy/testing/_private/utils.py | 4 +--- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index bf8074e0250e..00251af5bf68 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -150,17 +150,15 @@ ensure_castingimpl_exists(PyArray_DTypeMeta *from, PyArray_DTypeMeta *to) &res) < 0) { return_error = 1; } - - if (res == NULL && !return_error) { + else if (res == NULL) { res = create_casting_impl(from, to); if (res == NULL) { return_error = 1; } - } - if (!return_error && - PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, - (PyObject *)to, res) < 0) { - return_error = 1; + else if (PyDict_SetItem(NPY_DT_SLOTS(from)->castingimpls, + (PyObject *)to, res) < 0) { + return_error = 1; + } } Py_END_CRITICAL_SECTION(); if (return_error) { diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4db01f86f2fa..4b5106204f98 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -2704,13 +2704,11 @@ def run_threaded(func, max_workers=8, pass_count=False, else: all_args = [(func, *args) for i in range(max_workers)] try: - n_submitted = 0 futures = [] for arg in all_args: futures.append(tpe.submit(*arg)) - n_submitted += 1 finally: - if n_submitted < max_workers and pass_barrier: + if len(futures) < max_workers and pass_barrier: barrier.abort() for f in futures: f.result() From 48515a33c93234a50a5eaa13d8472e159a5d6fa0 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 11 Feb 2025 10:45:25 -0700 Subject: [PATCH 099/187] MAINT: Update some testing files from main - Checkout numpy/testing/_private/utils.py - Checkout numpy/_core/tests/test_multithreading.py - Checkout conftest.py - Update test_requirements.txt --- numpy/_core/tests/test_multithreading.py | 2 + numpy/conftest.py | 34 ++++++++++- numpy/testing/_private/utils.py | 77 ++++++++++++++++++------ requirements/test_requirements.txt | 1 + 4 files changed, 91 insertions(+), 23 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index f81d1f672f59..133268d276ee 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -1,4 +1,6 @@ +import concurrent.futures import threading +import string import numpy as np import pytest diff --git a/numpy/conftest.py b/numpy/conftest.py index b37092296005..0eb42d1103e4 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,6 +2,7 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os +import string import sys import tempfile from contextlib import contextmanager @@ -10,9 +11,11 @@ import hypothesis import pytest import numpy +import numpy as np from numpy._core._multiarray_tests import get_fpu_mode -from numpy.testing._private.utils import NOGIL_BUILD +from numpy._core.tests._natype import pd_NA +from numpy.testing._private.utils import NOGIL_BUILD, get_stringdtype_dtype try: from scipy_doctest.conftest import dt_config @@ -204,12 +207,12 @@ def warnings_errors_and_rng(test=None): dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType # temporary skips - dt_config.skiplist = set([ + dt_config.skiplist = { 'numpy.savez', # unclosed file 'numpy.matlib.savez', 'numpy.__array_namespace_info__', 'numpy.matlib.__array_namespace_info__', - ]) + } # xfail problematic tutorials dt_config.pytest_extra_xfail = { @@ -231,3 +234,28 @@ def warnings_errors_and_rng(test=None): 'numpy/f2py/_backends/_distutils.py', ] + +@pytest.fixture +def random_string_list(): + chars = list(string.ascii_letters + string.digits) + chars = np.array(chars, dtype="U1") + ret = np.random.choice(chars, size=100 * 10, replace=True) + return ret.view("U100") + + +@pytest.fixture(params=[True, False]) +def coerce(request): + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object(request): + return request.param + + +@pytest.fixture() +def dtype(na_object, coerce): + return get_stringdtype_dtype(na_object, coerce) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4b5106204f98..01fe6327713c 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -4,6 +4,7 @@ """ import os import sys +import pathlib import platform import re import gc @@ -19,6 +20,7 @@ import sysconfig import concurrent.futures import threading +import importlib.metadata import numpy as np from numpy._core import ( @@ -26,9 +28,11 @@ from numpy import isfinite, isnan, isinf import numpy.linalg._umath_linalg from numpy._utils import _rename_parameter +from numpy._core.tests._natype import pd_NA from io import StringIO + __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', @@ -42,7 +46,7 @@ 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', - 'IS_EDITABLE', 'run_threaded', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', ] @@ -54,10 +58,40 @@ class KnownFailureException(Exception): KnownFailureTest = KnownFailureException # backwards compat verbose = 0 +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info >= (3, 13): + IS_EDITABLE = np_dist.origin.dir_info.editable + else: + # Backport importlib.metadata.Distribution.origin + import json, types # noqa: E401 + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + IS_WASM = platform.machine() in ["wasm32", "wasm64"] IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") -IS_EDITABLE = not bool(np.__path__) or 'editable' in np.__path__[0] HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 @@ -101,14 +135,15 @@ def GetPerformanceAttributes(object, counter, instance=None, # thread's CPU usage is either 0 or 100). To read counters like this, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. - # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link) + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + #(dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh if format is None: format = win32pdh.PDH_FMT_LONG - path = win32pdh.MakeCounterPath( (machine, object, instance, None, - inum, counter)) + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) hq = win32pdh.OpenQuery() try: hc = win32pdh.AddCounter(hq, path) @@ -166,7 +201,7 @@ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]): l = f.readline().split(' ') return int(l[13]) except Exception: - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) else: # os.getpid is not in all platforms available. # Using time is safe but inaccurate, especially when process @@ -182,7 +217,7 @@ def jiffies(_load_time=[]): import time if not _load_time: _load_time.append(time.time()) - return int(100*(time.time()-_load_time[0])) + return int(100 * (time.time() - _load_time[0])) def build_err_msg(arrays, err_msg, header='Items are not equal:', @@ -190,7 +225,7 @@ def build_err_msg(arrays, err_msg, header='Items are not equal:', msg = ['\n' + header] err_msg = str(err_msg) if err_msg: - if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): msg = [msg[0] + ' ' + err_msg] else: msg.append(err_msg) @@ -659,14 +694,14 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', # Normalized the numbers to be in range (-10.0,10.0) # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) with np.errstate(invalid='ignore'): - scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = 0.5 * (np.abs(desired) + np.abs(actual)) scale = np.power(10, np.floor(np.log10(scale))) try: - sc_desired = desired/scale + sc_desired = desired / scale except ZeroDivisionError: sc_desired = 0.0 try: - sc_actual = actual/scale + sc_actual = actual / scale except ZeroDivisionError: sc_actual = 0.0 msg = build_err_msg( @@ -687,7 +722,7 @@ def assert_approx_equal(actual, desired, significant=7, err_msg='', return except (TypeError, NotImplementedError): pass - if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): raise AssertionError(msg) @@ -1379,10 +1414,10 @@ def check_support_sve(__cache=[]): """ gh-22982 """ - + if __cache: return __cache[0] - + import subprocess cmd = 'lscpu' try: @@ -1543,7 +1578,7 @@ def measure(code_str, times=1, label=None): i += 1 exec(code, globs, locs) elapsed = jiffies() - elapsed - return 0.01*elapsed + return 0.01 * elapsed def _assert_valid_refcount(op): @@ -1557,7 +1592,7 @@ def _assert_valid_refcount(op): import gc import numpy as np - b = np.arange(100*100).reshape(100, 100) + b = np.arange(100 * 100).reshape(100, 100) c = b i = 1 @@ -1735,7 +1770,7 @@ def assert_array_almost_equal_nulp(x, y, nulp=1): ax = np.abs(x) ay = np.abs(y) ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) - if not np.all(np.abs(x-y) <= ref): + if not np.all(np.abs(x - y) <= ref): if np.iscomplexobj(x) or np.iscomplexobj(y): msg = f"Arrays are not equal to {nulp} ULP" else: @@ -1851,7 +1886,7 @@ def nulp_diff(x, y, dtype=None): (x.shape, y.shape)) def _diff(rx, ry, vdt): - diff = np.asarray(rx-ry, dtype=vdt) + diff = np.asarray(rx - ry, dtype=vdt) return np.abs(diff) rx = integer_repr(x) @@ -2596,7 +2631,7 @@ def check_free_memory(free_bytes): except ValueError as exc: raise ValueError(f'Invalid environment variable {env_var}: {exc}') - msg = (f'{free_bytes/1e9} GB memory required, but environment variable ' + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' f'NPY_AVAILABLE_MEM={env_value} set') else: mem_free = _get_mem_available() @@ -2607,7 +2642,9 @@ def check_free_memory(free_bytes): "the test.") mem_free = -1 else: - msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available' + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' return msg if mem_free < free_bytes else None diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 7ea464dadc40..93e441f61310 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -9,6 +9,7 @@ pytest-cov==4.1.0 meson ninja; sys_platform != "emscripten" pytest-xdist +pytest-timeout # for numpy.random.test.test_extending cffi; python_version < '3.10' # For testing types. Notes on the restrictions: From 633874632a26e0af9b225608eff7abec31c92a87 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 12 Feb 2025 07:34:25 -0700 Subject: [PATCH 100/187] CI: update test_moderately_small_alpha [wheel build] The test_moderately_small_alpha dirichlet test fails for 32-bit Windows Python3.13t wheel builds due to a memory error. Decrease the size of the test array by a bit. Note that this test is only run for the mt19937 generator. --- numpy/random/tests/test_generator_mt19937.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index 514f9af2ce8c..c9dc81e96a37 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1244,7 +1244,7 @@ def test_dirichlet_small_alpha(self): @pytest.mark.slow def test_dirichlet_moderately_small_alpha(self): # Use alpha.max() < 0.1 to trigger stick breaking code path - alpha = np.array([0.02, 0.04, 0.03]) + alpha = np.array([0.02, 0.04]) exact_mean = alpha / alpha.sum() random = Generator(MT19937(self.seed)) sample = random.dirichlet(alpha, size=20000000) From 5ab0f7140ffe48c4e424f13b0207c28dda974547 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Wed, 5 Feb 2025 17:44:57 -0700 Subject: [PATCH 101/187] REL: Prepare for the NumPy 2.2.3 release [wheel build] - Create 2.2.3-changelog.rst. - Update 2.2.3-notes.rst. --- doc/changelog/2.2.3-changelog.rst | 43 ++++++++++++++++++++ doc/source/release/2.2.3-notes.rst | 63 ++++++++++++++++++++++++------ 2 files changed, 93 insertions(+), 13 deletions(-) create mode 100644 doc/changelog/2.2.3-changelog.rst diff --git a/doc/changelog/2.2.3-changelog.rst b/doc/changelog/2.2.3-changelog.rst new file mode 100644 index 000000000000..2cb6e99eec51 --- /dev/null +++ b/doc/changelog/2.2.3-changelog.rst @@ -0,0 +1,43 @@ + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha diff --git a/doc/source/release/2.2.3-notes.rst b/doc/source/release/2.2.3-notes.rst index 3c5a25668c1c..cf21d751ec00 100644 --- a/doc/source/release/2.2.3-notes.rst +++ b/doc/source/release/2.2.3-notes.rst @@ -4,16 +4,53 @@ NumPy 2.2.3 Release Notes ========================== - -Highlights -========== - -*We'll choose highlights for this release near the end of the release cycle.* - - -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) - -.. **Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst +NumPy 2.2.3 is a patch release that fixes bugs found after the 2.2.2 release. +The majority of the changes are typing improvements and fixes for free +threaded Python. Both of those areas are still under development, so if you +discover new problems, please report them. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 9 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !amotzop +* Charles Harris +* Chris Sidebottom +* Joren Hammudoglu +* Matthew Brett +* Nathan Goldbaum +* Raghuveer Devulapalli +* Sebastian Berg +* Yakov Danishevsky + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#28185 `__: MAINT: Prepare 2.2.x for further development +* `#28201 `__: BUG: fix data race in a more minimal way on stable branch +* `#28208 `__: BUG: Fix ``from_float_positional`` errors for huge pads +* `#28209 `__: BUG: fix data race in np.repeat +* `#28212 `__: MAINT: Use VQSORT_COMPILER_COMPATIBLE to determine if we should... +* `#28224 `__: MAINT: update highway to latest +* `#28236 `__: BUG: Add cpp atomic support (#28234) +* `#28237 `__: BLD: Compile fix for clang-cl on WoA +* `#28243 `__: TYP: Avoid upcasting ``float64`` in the set-ops +* `#28249 `__: BLD: better fix for clang / ARM compiles +* `#28266 `__: TYP: Fix ``timedelta64.__divmod__`` and ``timedelta64.__mod__``... +* `#28274 `__: TYP: Fixed missing typing information of set_printoptions +* `#28278 `__: BUG: backport resource cleanup bugfix from gh-28273 +* `#28282 `__: BUG: fix incorrect bytes to stringdtype coercion +* `#28283 `__: TYP: Fix scalar constructors +* `#28284 `__: TYP: stub ``numpy.matlib`` +* `#28285 `__: TYP: stub the missing ``numpy.testing`` modules +* `#28286 `__: CI: Fix the github label for ``TYP:`` PR's and issues +* `#28305 `__: TYP: Backport typing updates from main +* `#28321 `__: BUG: fix race initializing legacy dtype casts +* `#28324 `__: CI: update test_moderately_small_alpha From 2e446a01f89533a24291e519724e0e855da83ede Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 13 Feb 2025 10:36:35 -0700 Subject: [PATCH 102/187] MAINT: Prepare 2.2.x for further development. - Create 2.2.4-notes.rst - Update release.rst - Update pavement.rst - Update pyproject.toml [skip azp] [skip cirrus] [skip actions] --- doc/source/release.rst | 1 + doc/source/release/2.2.4-notes.rst | 20 ++++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.4-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index a22178a055ee..13413f3a1b83 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.4 2.2.3 2.2.2 2.2.1 diff --git a/doc/source/release/2.2.4-notes.rst b/doc/source/release/2.2.4-notes.rst new file mode 100644 index 000000000000..9cde851301f9 --- /dev/null +++ b/doc/source/release/2.2.4-notes.rst @@ -0,0 +1,20 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.4 Release Notes +========================== + +NumPy 2.2.4 is a patch release that fixes bugs found after the 2.2.3 release. + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/pavement.py b/pavement.py index 6b6a0668b7a1..e3e778d4bbfc 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.3-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.4-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index b4f39af4d56c..3d9889d2eeed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.3" +version = "2.2.4" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From 92d561e008c430bcacdf5e763084d08f06238ed0 Mon Sep 17 00:00:00 2001 From: Andrej730 Date: Thu, 13 Feb 2025 12:31:37 +0500 Subject: [PATCH 103/187] TYP: Fix missing typing arguments flags --- numpy/_core/numeric.pyi | 4 ++++ numpy/linalg/_linalg.pyi | 26 +++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index d23300752cd7..799ee7e17add 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -874,6 +874,8 @@ def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... def astype( x: ndarray[_ShapeType, dtype[Any]], dtype: _DTypeLike[_SCT], + /, + *, copy: bool = ..., device: None | L["cpu"] = ..., ) -> ndarray[_ShapeType, dtype[_SCT]]: ... @@ -881,6 +883,8 @@ def astype( def astype( x: ndarray[_ShapeType, dtype[Any]], dtype: DTypeLike, + /, + *, copy: bool = ..., device: None | L["cpu"] = ..., ) -> ndarray[_ShapeType, dtype[Any]]: ... diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index d3ca3eb701b7..550cab44cf49 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -176,11 +176,11 @@ def matrix_power( ) -> NDArray[Any]: ... @overload -def cholesky(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... @overload -def cholesky(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... +def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating[Any]]: ... @overload -def cholesky(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating[Any, Any]]: ... @overload def outer(x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown) -> NDArray[Any]: ... @@ -373,12 +373,16 @@ def norm( @overload def matrix_norm( x: ArrayLike, + /, + *, ord: None | float | L["fro", "nuc"] = ..., keepdims: bool = ..., ) -> floating[Any]: ... @overload def matrix_norm( x: ArrayLike, + /, + *, ord: None | float | L["fro", "nuc"] = ..., keepdims: bool = ..., ) -> Any: ... @@ -386,6 +390,8 @@ def matrix_norm( @overload def vector_norm( x: ArrayLike, + /, + *, axis: None = ..., ord: None | float = ..., keepdims: bool = ..., @@ -393,6 +399,8 @@ def vector_norm( @overload def vector_norm( x: ArrayLike, + /, + *, axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., ord: None | float = ..., keepdims: bool = ..., @@ -407,11 +415,15 @@ def multi_dot( def diagonal( x: ArrayLike, # >= 2D array + /, + *, offset: SupportsIndex = ..., ) -> NDArray[Any]: ... def trace( x: ArrayLike, # >= 2D array + /, + *, offset: SupportsIndex = ..., dtype: DTypeLike = ..., ) -> Any: ... @@ -420,24 +432,32 @@ def trace( def cross( a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, + /, + *, axis: int = ..., ) -> NDArray[unsignedinteger[Any]]: ... @overload def cross( a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, + /, + *, axis: int = ..., ) -> NDArray[signedinteger[Any]]: ... @overload def cross( a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, + /, + *, axis: int = ..., ) -> NDArray[floating[Any]]: ... @overload def cross( a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, + /, + *, axis: int = ..., ) -> NDArray[complexfloating[Any, Any]]: ... From a1713351f152cbbe80ef3b886354af69f61d7049 Mon Sep 17 00:00:00 2001 From: Andrej730 Date: Sat, 15 Feb 2025 01:02:16 +0500 Subject: [PATCH 104/187] TYP: Fix mismatching np.cross and np.linalg.cross typing arguments names --- numpy/_core/numeric.pyi | 28 ++++++++++++++-------------- numpy/linalg/_linalg.pyi | 16 ++++++++-------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 799ee7e17add..d97ee6e4f649 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -699,8 +699,8 @@ def moveaxis( @overload def cross( - x1: _ArrayLikeUnknown, - x2: _ArrayLikeUnknown, + a: _ArrayLikeUnknown, + b: _ArrayLikeUnknown, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -708,8 +708,8 @@ def cross( ) -> NDArray[Any]: ... @overload def cross( - x1: _ArrayLikeBool_co, - x2: _ArrayLikeBool_co, + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -717,8 +717,8 @@ def cross( ) -> NoReturn: ... @overload def cross( - x1: _ArrayLikeUInt_co, - x2: _ArrayLikeUInt_co, + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -726,8 +726,8 @@ def cross( ) -> NDArray[unsignedinteger[Any]]: ... @overload def cross( - x1: _ArrayLikeInt_co, - x2: _ArrayLikeInt_co, + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -735,8 +735,8 @@ def cross( ) -> NDArray[signedinteger[Any]]: ... @overload def cross( - x1: _ArrayLikeFloat_co, - x2: _ArrayLikeFloat_co, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -744,8 +744,8 @@ def cross( ) -> NDArray[floating[Any]]: ... @overload def cross( - x1: _ArrayLikeComplex_co, - x2: _ArrayLikeComplex_co, + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., @@ -753,8 +753,8 @@ def cross( ) -> NDArray[complexfloating[Any, Any]]: ... @overload def cross( - x1: _ArrayLikeObject_co, - x2: _ArrayLikeObject_co, + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, axisa: int = ..., axisb: int = ..., axisc: int = ..., diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 550cab44cf49..8ac75a47f5f8 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -430,32 +430,32 @@ def trace( @overload def cross( - a: _ArrayLikeUInt_co, - b: _ArrayLikeUInt_co, + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, /, *, axis: int = ..., ) -> NDArray[unsignedinteger[Any]]: ... @overload def cross( - a: _ArrayLikeInt_co, - b: _ArrayLikeInt_co, + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, /, *, axis: int = ..., ) -> NDArray[signedinteger[Any]]: ... @overload def cross( - a: _ArrayLikeFloat_co, - b: _ArrayLikeFloat_co, + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, /, *, axis: int = ..., ) -> NDArray[floating[Any]]: ... @overload def cross( - a: _ArrayLikeComplex_co, - b: _ArrayLikeComplex_co, + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, /, *, axis: int = ..., From d8553b8588cf3adcd8948084f77fc6b7b83e67dc Mon Sep 17 00:00:00 2001 From: Abhishek Kumar <142383124+abhishek-iitmadras@users.noreply.github.com> Date: Thu, 13 Feb 2025 14:38:11 +0530 Subject: [PATCH 105/187] CI: Update FreeBSD base image in `cirrus_arm.yml` (#28328) --- tools/ci/cirrus_arm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index 46fed5bbf0c4..180770451c44 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -67,7 +67,7 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-14-0 + image: family/freebsd-14-2 platform: freebsd cpu: 1 memory: 4G From 8937d9478c46af92ec6dc0dd71c6443aa7cc34ee Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Fri, 21 Feb 2025 16:55:32 +0100 Subject: [PATCH 106/187] MAINT: No need to check for check for FPEs in casts to/from object (#28358) * MAINT: No need to check for check for FPEs in casts to/from object Since these go via Python (in some form) and Python doesn't use FPEs we can be sure that we don't need to check for FPEs. Note that while it hides *almost always* spurious FPEs seen on some platforms, there could be certain chains or multiple cast situations where FPEs are checked for other reasons and the spurious FPE will show up. So it "somewhat": Closes gh-28351 * MAINT: Follow-up, got the wrong place (the other is OK). * DOC: Add a small comment as per review request I don't think it needs a comment that we can do this, but maybe it is nice to say that there was a reason for it. --- numpy/_core/src/multiarray/convert_datatype.c | 8 ++++++-- numpy/_core/src/multiarray/dtype_transfer.c | 7 ++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 00251af5bf68..158c9ed207b5 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -3517,7 +3517,9 @@ initialize_void_and_object_globals(void) { method->nin = 1; method->nout = 1; method->name = "object_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->flags = (NPY_METH_SUPPORTS_UNALIGNED + | NPY_METH_REQUIRES_PYAPI + | NPY_METH_NO_FLOATINGPOINT_ERRORS); method->casting = NPY_UNSAFE_CASTING; method->resolve_descriptors = &object_to_any_resolve_descriptors; method->get_strided_loop = &object_to_any_get_loop; @@ -3532,7 +3534,9 @@ initialize_void_and_object_globals(void) { method->nin = 1; method->nout = 1; method->name = "any_to_object_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->flags = (NPY_METH_SUPPORTS_UNALIGNED + | NPY_METH_REQUIRES_PYAPI + | NPY_METH_NO_FLOATINGPOINT_ERRORS); method->casting = NPY_SAFE_CASTING; method->resolve_descriptors = &any_to_object_resolve_descriptors; method->get_strided_loop = &any_to_object_get_loop; diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index d7a5e80800b6..188a55a4b5f5 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -235,8 +235,8 @@ any_to_object_get_loop( NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - - *flags = NPY_METH_REQUIRES_PYAPI; /* No need for floating point errors */ + /* Python API doesn't use FPEs and this also attempts to hide spurious ones. */ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; *out_loop = _strided_to_strided_any_to_object; *out_transferdata = PyMem_Malloc(sizeof(_any_to_object_auxdata)); @@ -342,7 +342,8 @@ object_to_any_get_loop( NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - *flags = NPY_METH_REQUIRES_PYAPI; + /* Python API doesn't use FPEs and this also attempts to hide spurious ones. */ + *flags = NPY_METH_REQUIRES_PYAPI | NPY_METH_NO_FLOATINGPOINT_ERRORS; /* NOTE: auxdata is only really necessary to flag `move_references` */ _object_to_any_auxdata *data = PyMem_Malloc(sizeof(*data)); From 363135470f2ea68950117cdf2469ac961430baa0 Mon Sep 17 00:00:00 2001 From: Giovanni Del Monte Date: Tue, 18 Feb 2025 15:48:48 +0100 Subject: [PATCH 107/187] BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows (#28319) * fixed bug in function _read in numpy/lib/_npyio_impl.py, misnamed variable skiplines as skiprows; added test in numpy/lib/tests/test_loadtxt.py * fixed sintax in test_loadtxt.py * changed use of mkstemp with use of tmpdir provided by pytest * fixed bug in use of tmpdir in loadtxt test * Update numpy/lib/tests/test_loadtxt.py Co-authored-by: Sebastian Berg * Update file numpy/lib/tests/test_loadtxt.py * Update file numpy/lib/tests/test_loadtxt.py * Update numpy/lib/tests/test_loadtxt.py --------- Co-authored-by: Sebastian Berg --- numpy/lib/_npyio_impl.py | 2 +- numpy/lib/tests/test_loadtxt.py | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index f0d1bb2b0c68..4dc3a4b9b7e2 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -1084,7 +1084,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # be adapted (in principle the concatenate could cast). chunks.append(next_arr.astype(read_dtype_via_object_chunks)) - skiprows = 0 # Only have to skip for first chunk + skiplines = 0 # Only have to skip for first chunk if max_rows >= 0: max_rows -= chunk_size if len(next_arr) < chunk_size: diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 116cd1608da3..60717be3bd9a 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -1073,3 +1073,28 @@ def test_maxrows_exceeding_chunksize(nmax): res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) os.remove(fname) assert len(res) == nmax + +@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000)) +def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip): + # tries to read a file in chunks by skipping a variable amount of lines, + # less, equal, greater than max_rows + file_length = 110000 + data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1)) + expected_length = min(60000, file_length - nskip) + expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str) + + # file-like path + txt = StringIO(data) + res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) + + # file-obj path + tmp_file = tmpdir / "test_data.txt" + tmp_file.write(data) + fname = str(tmp_file) + res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) From 4615d543b492b10c8935cf6ef97d3bc798fbb53a Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 14:55:52 +0100 Subject: [PATCH 108/187] [ENH] add multi-threading test for np.nonzero --- numpy/_core/tests/test_multithreading.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 133268d276ee..de283df6a44b 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -271,3 +271,21 @@ def closure(b): # Reducing the number of threads means the test doesn't trigger the # bug. Better to skip on some platforms than add a useless test. pytest.skip("Couldn't spawn enough threads to run the test") + + +def test_nonzero(): + # np.nonzero uses np.count_nonzero to determine the size of the output array + # In a second pass the indices of the non-zero elements are determined, but they can have changed + + for dtype in [bool, int, float]: + x= np.random.randint(4, size=10_000).astype(dtype) + + def func(seed): + x[::2] = np.random.randint(2) + try: + r = np.nonzero(x) + assert r[0].min() >= 0 + except RuntimeError as ex: + assert 'number of non-zero array elements changed during function execution' in str(ex) + + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) From f5069056a336572611794fe1dd6e748f1d35c528 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 16:08:21 +0100 Subject: [PATCH 109/187] lint --- numpy/_core/tests/test_multithreading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index de283df6a44b..e49996ab887f 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -278,7 +278,7 @@ def test_nonzero(): # In a second pass the indices of the non-zero elements are determined, but they can have changed for dtype in [bool, int, float]: - x= np.random.randint(4, size=10_000).astype(dtype) + x = np.random.randint(4, size=10_000).astype(dtype) def func(seed): x[::2] = np.random.randint(2) From 1b73bdddb3b7ca31702f79502b7212e7467162c9 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 16:12:36 +0100 Subject: [PATCH 110/187] split test --- numpy/_core/tests/test_multithreading.py | 56 ++++++++++++++++++------ 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index e49996ab887f..013690c8d101 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -272,20 +272,48 @@ def closure(b): # bug. Better to skip on some platforms than add a useless test. pytest.skip("Couldn't spawn enough threads to run the test") +def test_nonzero_bool(): + # np.nonzero uses np.count_nonzero to determine the size of the output array + # In a second pass the indices of the non-zero elements are determined, but they can have changed + x = np.random.randint(4, size=10_000).astype(bool) + + def func(seed): + x[::2] = np.random.randint(2) + try: + r = np.nonzero(x) + assert r[0].min() >= 0 + except RuntimeError as ex: + assert 'number of non-zero array elements changed during function execution' in str(ex) -def test_nonzero(): + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) + +def test_nonzero_int(): # np.nonzero uses np.count_nonzero to determine the size of the output array # In a second pass the indices of the non-zero elements are determined, but they can have changed - - for dtype in [bool, int, float]: - x = np.random.randint(4, size=10_000).astype(dtype) - - def func(seed): - x[::2] = np.random.randint(2) - try: - r = np.nonzero(x) - assert r[0].min() >= 0 - except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) - - run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) + x = np.random.randint(4, size=10_000).astype(int) + + def func(seed): + x[::2] = np.random.randint(2) + try: + r = np.nonzero(x) + assert r[0].min() >= 0 + except RuntimeError as ex: + assert 'number of non-zero array elements changed during function execution' in str(ex) + + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) + +def test_nonzero_float(): + # np.nonzero uses np.count_nonzero to determine the size of the output array + # In a second pass the indices of the non-zero elements are determined, but they can have changed + x = np.random.randint(4, size=10_000).astype(float) + + def func(seed): + x[::2] = np.random.randint(2) + try: + r = np.nonzero(x) + assert r[0].min() >= 0 + except RuntimeError as ex: + assert 'number of non-zero array elements changed during function execution' in str(ex) + + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) + From ec14eff2c4b817db323c4275be71489b1db4bb35 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 17:06:20 +0100 Subject: [PATCH 111/187] fix buffer overflow --- numpy/_core/src/multiarray/item_selection.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index fbcc0f7b162c..f71af3b0ed03 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2896,7 +2896,8 @@ PyArray_Nonzero(PyArrayObject *self) if (((double)nonzero_count / count) <= 0.1) { npy_intp subsize; npy_intp j = 0; - while (1) { + npy_intp * multi_index_end = multi_index + nonzero_count; + while (multi_index < multi_index_end) { npy_memchr(data + j * stride, 0, stride, count - j, &subsize, 1); j += subsize; From df22d5dced832e8893d1dd200ef0dedf761d89b9 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 19:35:21 +0100 Subject: [PATCH 112/187] fix one more buffer overflow --- numpy/_core/src/multiarray/item_selection.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index f71af3b0ed03..fcd78a28b365 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2916,7 +2916,7 @@ PyArray_Nonzero(PyArrayObject *self) npy_intp j = 0; /* Manually unroll for GCC and maybe other compilers */ - while (multi_index + 4 < multi_index_end) { + while (multi_index + 4 < multi_index_end && (j < count - 4) ) { *multi_index = j; multi_index += data[0] != 0; *multi_index = j + 1; @@ -2929,7 +2929,7 @@ PyArray_Nonzero(PyArrayObject *self) j += 4; } - while (multi_index < multi_index_end) { + while (multi_index < multi_index_end && (j < count) ) { *multi_index = j; multi_index += *data != 0; data += stride; From de288cb24e1f266a7f08c81b832117e49e70b81c Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 19:51:16 +0100 Subject: [PATCH 113/187] fix test --- numpy/_core/tests/test_multithreading.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 013690c8d101..57f940804c70 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -281,7 +281,6 @@ def func(seed): x[::2] = np.random.randint(2) try: r = np.nonzero(x) - assert r[0].min() >= 0 except RuntimeError as ex: assert 'number of non-zero array elements changed during function execution' in str(ex) @@ -296,7 +295,6 @@ def func(seed): x[::2] = np.random.randint(2) try: r = np.nonzero(x) - assert r[0].min() >= 0 except RuntimeError as ex: assert 'number of non-zero array elements changed during function execution' in str(ex) @@ -311,7 +309,6 @@ def func(seed): x[::2] = np.random.randint(2) try: r = np.nonzero(x) - assert r[0].min() >= 0 except RuntimeError as ex: assert 'number of non-zero array elements changed during function execution' in str(ex) From 96cc64d997c0da365fe3960c149c8592fd815a0c Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Thu, 20 Feb 2025 20:32:13 +0100 Subject: [PATCH 114/187] parameterize tests --- numpy/_core/tests/test_multithreading.py | 37 ++++-------------------- 1 file changed, 5 insertions(+), 32 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 57f940804c70..ee8e3f86eaec 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -272,45 +272,18 @@ def closure(b): # bug. Better to skip on some platforms than add a useless test. pytest.skip("Couldn't spawn enough threads to run the test") -def test_nonzero_bool(): +@pytest.mark.parametrize("dtype", [bool, int, float]) +def test_nonzero_bool(dtype): # np.nonzero uses np.count_nonzero to determine the size of the output array # In a second pass the indices of the non-zero elements are determined, but they can have changed - x = np.random.randint(4, size=10_000).astype(bool) + x = np.random.randint(4, size=10_000).astype(dtype) def func(seed): x[::2] = np.random.randint(2) try: - r = np.nonzero(x) + _ = np.nonzero(x) except RuntimeError as ex: assert 'number of non-zero array elements changed during function execution' in str(ex) - run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) - -def test_nonzero_int(): - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed - x = np.random.randint(4, size=10_000).astype(int) - - def func(seed): - x[::2] = np.random.randint(2) - try: - r = np.nonzero(x) - except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) - - run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) - -def test_nonzero_float(): - # np.nonzero uses np.count_nonzero to determine the size of the output array - # In a second pass the indices of the non-zero elements are determined, but they can have changed - x = np.random.randint(4, size=10_000).astype(float) - - def func(seed): - x[::2] = np.random.randint(2) - try: - r = np.nonzero(x) - except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) - - run_threaded(func, max_workers=10, pass_count=True, outer_iterations=10) + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=50) From fa697ad64717749bcaad2df4edc133d19dfd1df5 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 21 Feb 2025 10:18:26 -0700 Subject: [PATCH 115/187] MAINT: turn off halt_on_error on sanitizer CI --- .github/workflows/compiler_sanitizers.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 9477e0be1bd1..78a923ff98e5 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -68,7 +68,7 @@ jobs: - name: Test run: | # pass -s to pytest to see ASAN errors and warnings, otherwise pytest captures them - ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:halt_on_error=1 \ + ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1 \ python -m spin test -- -v -s --timeout=600 --durations=10 clang_TSAN: @@ -121,7 +121,7 @@ jobs: - name: Test run: | # These tests are slow, so only run tests in files that do "import threading" to make them count - TSAN_OPTIONS=allocator_may_return_null=1:halt_on_error=1 \ + TSAN_OPTIONS=allocator_may_return_null=1 \ python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 From 4c9da471fb3da558d7f51d5e8a43c4c5da191ff8 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 21 Feb 2025 21:05:37 +0100 Subject: [PATCH 116/187] attampt to add TSAN suppressions --- .github/workflows/compiler_sanitizers.yml | 2 +- tools/ci/tsan_suppressions.txt | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tools/ci/tsan_suppressions.txt diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 78a923ff98e5..7a5745228b7d 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -121,7 +121,7 @@ jobs: - name: Test run: | # These tests are slow, so only run tests in files that do "import threading" to make them count - TSAN_OPTIONS=allocator_may_return_null=1 \ + TSAN_OPTIONS="allocator_may_return_null=1:halt_on_error=1:suppressions=tools\ci\tsan_suppressions.txt" \ python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 diff --git a/tools/ci/tsan_suppressions.txt b/tools/ci/tsan_suppressions.txt new file mode 100644 index 000000000000..3dc9168f6b5d --- /dev/null +++ b/tools/ci/tsan_suppressions.txt @@ -0,0 +1,9 @@ +# This file contains suppressions for the TSAN tool +# +# Reference: https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions + + +# These warnings trigger directly in a CPython function. + +# For np.nonzero, see gh-28361 +race:lowlevel_strided_loops.c.src From 2c9afc69d50a01d1d8ab8829601aedea68b54232 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Fri, 21 Feb 2025 22:37:31 +0100 Subject: [PATCH 117/187] try to suppress --- tools/ci/tsan_suppressions.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/ci/tsan_suppressions.txt b/tools/ci/tsan_suppressions.txt index 3dc9168f6b5d..b74671231e27 100644 --- a/tools/ci/tsan_suppressions.txt +++ b/tools/ci/tsan_suppressions.txt @@ -7,3 +7,8 @@ # For np.nonzero, see gh-28361 race:lowlevel_strided_loops.c.src +race:count_nonzero_int +race:count_nonzero_bool +race:count_nonzero_float + + From ef3c7d08a64fd5be559859818f319739c2144c52 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 22 Feb 2025 14:07:49 +0100 Subject: [PATCH 118/187] review comments --- .github/workflows/compiler_sanitizers.yml | 2 +- numpy/_core/src/multiarray/item_selection.c | 3 +-- numpy/_core/tests/test_multithreading.py | 9 +++++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 7a5745228b7d..cc9659696b2d 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -121,7 +121,7 @@ jobs: - name: Test run: | # These tests are slow, so only run tests in files that do "import threading" to make them count - TSAN_OPTIONS="allocator_may_return_null=1:halt_on_error=1:suppressions=tools\ci\tsan_suppressions.txt" \ + TSAN_OPTIONS="allocator_may_return_null=1:suppressions=/Users/runner/work/numpy/numpy/tools/ci/tsan_suppressions.txt" \ python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index fcd78a28b365..6e9b03a41343 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2893,10 +2893,10 @@ PyArray_Nonzero(PyArrayObject *self) * the fast bool count is followed by this sparse path is faster * than combining the two loops, even for larger arrays */ + npy_intp * multi_index_end = multi_index + nonzero_count; if (((double)nonzero_count / count) <= 0.1) { npy_intp subsize; npy_intp j = 0; - npy_intp * multi_index_end = multi_index + nonzero_count; while (multi_index < multi_index_end) { npy_memchr(data + j * stride, 0, stride, count - j, &subsize, 1); @@ -2912,7 +2912,6 @@ PyArray_Nonzero(PyArrayObject *self) * stalls that are very expensive on most modern processors. */ else { - npy_intp *multi_index_end = multi_index + nonzero_count; npy_intp j = 0; /* Manually unroll for GCC and maybe other compilers */ diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index ee8e3f86eaec..677245ff902a 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -274,16 +274,21 @@ def closure(b): @pytest.mark.parametrize("dtype", [bool, int, float]) def test_nonzero_bool(dtype): + # See: gh-28361 + # # np.nonzero uses np.count_nonzero to determine the size of the output array # In a second pass the indices of the non-zero elements are determined, but they can have changed + # + # This test triggers a data race which is suppressed in the TSAN CI. The test is to ensure + # np.nonzero does not generate a segmentation fault x = np.random.randint(4, size=10_000).astype(dtype) - def func(seed): + def func(): x[::2] = np.random.randint(2) try: _ = np.nonzero(x) except RuntimeError as ex: assert 'number of non-zero array elements changed during function execution' in str(ex) - run_threaded(func, max_workers=10, pass_count=True, outer_iterations=50) + run_threaded(func, max_workers=10, pass_count=False, outer_iterations=50) From 336a2c3012d6fb8497f522339edb99dc5051a279 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 22 Feb 2025 15:06:04 +0100 Subject: [PATCH 119/187] more suppressions --- tools/ci/tsan_suppressions.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/ci/tsan_suppressions.txt b/tools/ci/tsan_suppressions.txt index b74671231e27..6360cf716d00 100644 --- a/tools/ci/tsan_suppressions.txt +++ b/tools/ci/tsan_suppressions.txt @@ -6,9 +6,10 @@ # These warnings trigger directly in a CPython function. # For np.nonzero, see gh-28361 -race:lowlevel_strided_loops.c.src +#race:lowlevel_strided_loops.c.src +race:PyArray_Nonzero race:count_nonzero_int race:count_nonzero_bool race:count_nonzero_float - +race:DOUBLE_nonzero From 823b3f01bbc29e70304e80c69f3db86700533d6c Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 22 Feb 2025 21:10:51 +0100 Subject: [PATCH 120/187] avoid race in writing --- .github/workflows/compiler_sanitizers.yml | 3 +++ numpy/_core/tests/test_multithreading.py | 19 +++++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index cc9659696b2d..7f5329da89a2 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -120,6 +120,9 @@ jobs: python -m spin build -j2 -- -Db_sanitize=thread - name: Test run: | + # try to figure out root folder for source + echo $HOME + set # These tests are slow, so only run tests in files that do "import threading" to make them count TSAN_OPTIONS="allocator_may_return_null=1:suppressions=/Users/runner/work/numpy/numpy/tools/ci/tsan_suppressions.txt" \ python -m spin test \ diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index 677245ff902a..b3bbbe403577 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -273,7 +273,7 @@ def closure(b): pytest.skip("Couldn't spawn enough threads to run the test") @pytest.mark.parametrize("dtype", [bool, int, float]) -def test_nonzero_bool(dtype): +def test_nonzero(dtype): # See: gh-28361 # # np.nonzero uses np.count_nonzero to determine the size of the output array @@ -283,12 +283,15 @@ def test_nonzero_bool(dtype): # np.nonzero does not generate a segmentation fault x = np.random.randint(4, size=10_000).astype(dtype) - def func(): - x[::2] = np.random.randint(2) - try: - _ = np.nonzero(x) - except RuntimeError as ex: - assert 'number of non-zero array elements changed during function execution' in str(ex) + def func(index): + for _ in range(10): + if index == 0: + x[::2] = np.random.randint(2) + else: + try: + _ = np.nonzero(x) + except RuntimeError as ex: + assert 'number of non-zero array elements changed during function execution' in str(ex) - run_threaded(func, max_workers=10, pass_count=False, outer_iterations=50) + run_threaded(func, max_workers=10, pass_count=True, outer_iterations=50) From 448da118337a6af507dd66a0e423b48f2327ab82 Mon Sep 17 00:00:00 2001 From: Pieter Eendebak Date: Sat, 22 Feb 2025 21:35:00 +0100 Subject: [PATCH 121/187] cleanup --- .github/workflows/compiler_sanitizers.yml | 5 +---- tools/ci/tsan_suppressions.txt | 4 ---- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/.github/workflows/compiler_sanitizers.yml b/.github/workflows/compiler_sanitizers.yml index 7f5329da89a2..09e71051556d 100644 --- a/.github/workflows/compiler_sanitizers.yml +++ b/.github/workflows/compiler_sanitizers.yml @@ -120,11 +120,8 @@ jobs: python -m spin build -j2 -- -Db_sanitize=thread - name: Test run: | - # try to figure out root folder for source - echo $HOME - set # These tests are slow, so only run tests in files that do "import threading" to make them count - TSAN_OPTIONS="allocator_may_return_null=1:suppressions=/Users/runner/work/numpy/numpy/tools/ci/tsan_suppressions.txt" \ + TSAN_OPTIONS="allocator_may_return_null=1:suppressions=$GITHUB_WORKSPACE/tools/ci/tsan_suppressions.txt" \ python -m spin test \ `find numpy -name "test*.py" | xargs grep -l "import threading" | tr '\n' ' '` \ -- -v -s --timeout=600 --durations=10 diff --git a/tools/ci/tsan_suppressions.txt b/tools/ci/tsan_suppressions.txt index 6360cf716d00..0745debd8e5f 100644 --- a/tools/ci/tsan_suppressions.txt +++ b/tools/ci/tsan_suppressions.txt @@ -2,11 +2,7 @@ # # Reference: https://github.com/google/sanitizers/wiki/ThreadSanitizerSuppressions - -# These warnings trigger directly in a CPython function. - # For np.nonzero, see gh-28361 -#race:lowlevel_strided_loops.c.src race:PyArray_Nonzero race:count_nonzero_int race:count_nonzero_bool From 1efec00b4db1ff49b6ae16f99abd5c237d1c200b Mon Sep 17 00:00:00 2001 From: Tyler Reddy Date: Mon, 3 Mar 2025 07:31:30 -0700 Subject: [PATCH 122/187] BUG: safer bincount casting (#28355) * BUG: safer bincount casting * Fixes #28354 * Force usage of `npy_intp` type in `np.bincount()` and avoid unsafe casting errors with i.e., `npy_uint64`. This is similar to our behavior with indexing. * MAINT, BUG: PR 28355 revisions * `arr_bincount()` now only uses unsafe casting for integer input types, and the number of casting operations has been reduced for the code path used in above PR. * a test for non-crashing behavior with non-contiguous `bincount()` input has been added. * MAINT, BUG: PR 28355 revisions * Based on reviewer feedback, narrow the scope of the `flags` variable in `arr_bincount()`. * Based on reviewer feedback, add an array-like test for the `uint64` casting issue, which indeed fails before and passes after adding a similar shim to the array-like code path in `arr_bincount()`. * Based on reviewer feedback, switch the patching from `PyArray_Size` to `PyArray_Check` in a few places. * Update numpy/_core/src/multiarray/compiled_base.c --------- Co-authored-by: Sebastian Berg --- numpy/_core/src/multiarray/compiled_base.c | 16 +++++++++++++--- numpy/lib/tests/test_function_base.py | 21 +++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 48524aff4dac..fca733597a2d 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -150,8 +150,12 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, } if (PyArray_SIZE(tmp1) > 0) { /* The input is not empty, so convert it to NPY_INTP. */ - lst = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)tmp1, - NPY_INTP, 1, 1); + int flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + if (PyArray_ISINTEGER(tmp1)) { + flags = flags | NPY_ARRAY_FORCECAST; + } + PyArray_Descr* local_dtype = PyArray_DescrFromType(NPY_INTP); + lst = (PyArrayObject *)PyArray_FromAny((PyObject *)tmp1, local_dtype, 1, 1, flags, NULL); Py_DECREF(tmp1); if (lst == NULL) { /* Failed converting to NPY_INTP. */ @@ -177,7 +181,13 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, } if (lst == NULL) { - lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + int flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + if (PyArray_Check((PyObject *)list) && + PyArray_ISINTEGER((PyArrayObject *)list)) { + flags = flags | NPY_ARRAY_FORCECAST; + } + PyArray_Descr* local_dtype = PyArray_DescrFromType(NPY_INTP); + lst = (PyArrayObject *)PyArray_FromAny(list, local_dtype, 1, 1, flags, NULL); if (lst == NULL) { goto fail; } diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index ed59a4a86181..c97ef92a7889 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2925,6 +2925,27 @@ def test_error_not_1d(self, vals): with assert_raises(ValueError): np.bincount(vals) + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) + def test_gh_28354(self, dt): + a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + def test_contiguous_handling(self): + # check for absence of hard crash + np.bincount(np.arange(10000)[::2]) + + def test_gh_28354_array_like(self): + class A: + def __array__(self): + return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64) + + a = A() + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + class TestInterp: From ac7e1a11f0be590753f1c36077bd142d5f74c496 Mon Sep 17 00:00:00 2001 From: Jonathan Albrecht Date: Fri, 14 Feb 2025 15:50:16 -0500 Subject: [PATCH 123/187] BUG: Fix building on s390x with clang clang on s390x did not have implementations of vector logical operators such as vec_and, vec_or and vec_xor in vecintrin.h until __VEC__ == 10305 which caused compile errors. Add implementations to allow the build to complete. Currently, clang >= 19 is required for all tests to pass because that is the minimum version supported by highway on s390x with clang. --- numpy/_core/src/common/simd/vec/operators.h | 25 ++++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/numpy/_core/src/common/simd/vec/operators.h b/numpy/_core/src/common/simd/vec/operators.h index 50dac20f6d7d..3a402689d02f 100644 --- a/numpy/_core/src/common/simd/vec/operators.h +++ b/numpy/_core/src/common/simd/vec/operators.h @@ -44,6 +44,10 @@ /*************************** * Logical ***************************/ +#define NPYV_IMPL_VEC_BIN_WRAP(INTRIN, SFX) \ + NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ + { return vec_##INTRIN(a, b); } + #define NPYV_IMPL_VEC_BIN_CAST(INTRIN, SFX, CAST) \ NPY_FINLINE npyv_##SFX npyv_##INTRIN##_##SFX(npyv_##SFX a, npyv_##SFX b) \ { return (npyv_##SFX)vec_##INTRIN((CAST)a, (CAST)b); } @@ -54,6 +58,15 @@ #else #define NPYV_IMPL_VEC_BIN_B64(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, b64, npyv_b64) #endif + +// Up to clang __VEC__ 10305 logical intrinsics do not support f32 or f64 +#if defined(NPY_HAVE_VX) && defined(__clang__) && __VEC__ < 10305 + #define NPYV_IMPL_VEC_BIN_F32(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, f32, npyv_u32) + #define NPYV_IMPL_VEC_BIN_F64(INTRIN) NPYV_IMPL_VEC_BIN_CAST(INTRIN, f64, npyv_u64) +#else + #define NPYV_IMPL_VEC_BIN_F32(INTRIN) NPYV_IMPL_VEC_BIN_WRAP(INTRIN, f32) + #define NPYV_IMPL_VEC_BIN_F64(INTRIN) NPYV_IMPL_VEC_BIN_WRAP(INTRIN, f64) +#endif // AND #define npyv_and_u8 vec_and #define npyv_and_s8 vec_and @@ -64,9 +77,9 @@ #define npyv_and_u64 vec_and #define npyv_and_s64 vec_and #if NPY_SIMD_F32 - #define npyv_and_f32 vec_and + NPYV_IMPL_VEC_BIN_F32(and) #endif -#define npyv_and_f64 vec_and +NPYV_IMPL_VEC_BIN_F64(and) #define npyv_and_b8 vec_and #define npyv_and_b16 vec_and #define npyv_and_b32 vec_and @@ -82,9 +95,9 @@ NPYV_IMPL_VEC_BIN_B64(and) #define npyv_or_u64 vec_or #define npyv_or_s64 vec_or #if NPY_SIMD_F32 - #define npyv_or_f32 vec_or + NPYV_IMPL_VEC_BIN_F32(or) #endif -#define npyv_or_f64 vec_or +NPYV_IMPL_VEC_BIN_F64(or) #define npyv_or_b8 vec_or #define npyv_or_b16 vec_or #define npyv_or_b32 vec_or @@ -100,9 +113,9 @@ NPYV_IMPL_VEC_BIN_B64(or) #define npyv_xor_u64 vec_xor #define npyv_xor_s64 vec_xor #if NPY_SIMD_F32 - #define npyv_xor_f32 vec_xor + NPYV_IMPL_VEC_BIN_F32(xor) #endif -#define npyv_xor_f64 vec_xor +NPYV_IMPL_VEC_BIN_F64(xor) #define npyv_xor_b8 vec_xor #define npyv_xor_b16 vec_xor #define npyv_xor_b32 vec_xor From 8f78bcfe7d8c0e3253ffc7291acd1c6295c44320 Mon Sep 17 00:00:00 2001 From: mayeut Date: Sun, 2 Mar 2025 08:20:36 +0100 Subject: [PATCH 124/187] CI: use QEMU 9.2.2 for Linux Qemu tests Following a kernel update on GHA runners, QEMU tests are failing randomly. Update the version of QEMU used in order to fix the random segfauls. --- .github/workflows/linux_qemu.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index d773152bb1bb..5ac7cea857a7 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -14,6 +14,7 @@ on: branches: - main - maintenance/** + workflow_dispatch: defaults: run: @@ -28,8 +29,9 @@ permissions: jobs: linux_qemu: - # To enable this workflow on a fork, comment out: - if: github.repository == 'numpy/numpy' + # Only workflow_dispatch is enabled on forks. + # To enable this job and subsequent jobs on a fork for other events, comment out: + if: github.repository == 'numpy/numpy' || github.event_name == 'workflow_dispatch' runs-on: ubuntu-22.04 continue-on-error: true strategy: @@ -107,7 +109,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all - name: Install GCC cross-compilers run: | @@ -176,4 +178,3 @@ jobs: cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" '" - From a73839bdfa953c013e6d2781153ad0dfcdd0202f Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 2 Mar 2025 19:48:44 +1100 Subject: [PATCH 125/187] Update .github/workflows/linux_qemu.yml --- .github/workflows/linux_qemu.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 5ac7cea857a7..dd0b5778c2c7 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -109,6 +109,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | + # see https://hub.docker.com/r/tonistiigi/binfmt for available versions docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all - name: Install GCC cross-compilers From eb85757c5005fff1fca883e52252623d1a364509 Mon Sep 17 00:00:00 2001 From: Andrew Nelson Date: Sun, 2 Mar 2025 19:49:15 +1100 Subject: [PATCH 126/187] Update linux_qemu.yml --- .github/workflows/linux_qemu.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index dd0b5778c2c7..15681f4c476f 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -109,7 +109,7 @@ jobs: - name: Initialize binfmt_misc for qemu-user-static run: | - # see https://hub.docker.com/r/tonistiigi/binfmt for available versions + # see https://hub.docker.com/r/tonistiigi/binfmt for available versions docker run --rm --privileged tonistiigi/binfmt:qemu-v9.2.2-52 --install all - name: Install GCC cross-compilers From 975443d05bb98d46267ae63d53e95e4bbb0e006b Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 3 Mar 2025 11:53:31 -0700 Subject: [PATCH 127/187] TST: add new IS_64BIT constant for testing --- numpy/_core/tests/test_array_coercion.py | 6 +++--- numpy/_core/tests/test_multiarray.py | 4 ++-- numpy/_core/tests/test_regression.py | 5 +++-- numpy/f2py/tests/test_return_real.py | 5 ++--- numpy/f2py/tests/test_semicolon_split.py | 9 ++++----- numpy/lib/tests/test_format.py | 5 ++--- numpy/testing/_private/utils.py | 3 ++- 7 files changed, 18 insertions(+), 19 deletions(-) diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index c7ceb92650c9..55c5005149c1 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -14,7 +14,8 @@ from numpy._core._rational_tests import rational from numpy.testing import ( - assert_array_equal, assert_warns, IS_PYPY) + assert_array_equal, assert_warns, IS_PYPY, IS_64BIT +) def arraylikes(): @@ -716,8 +717,7 @@ def __array__(self, dtype=None, copy=None): arr = np.array([ArrayLike]) assert arr[0] is ArrayLike - @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") def test_too_large_array_error_paths(self): """Test the error paths, including for memory leaks""" arr = np.array(0, dtype="uint8") diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 7ac22869495f..cc9fc9b19ac8 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -30,7 +30,7 @@ assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - check_support_sve, assert_array_compare, + check_support_sve, assert_array_compare, IS_64BIT ) from numpy.testing._private.utils import requires_memory, _no_tracing from numpy._core.tests._locales import CommaDecimalPointLocale @@ -983,7 +983,7 @@ def test_too_big_error(self): assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) - @pytest.mark.skipif(np.dtype(np.intp).itemsize != 8, + @pytest.mark.skipif(not IS_64BIT, reason="malloc may not fail on 32 bit systems") def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index c4a0a55227a0..13b4ad16d592 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -14,7 +14,8 @@ assert_, assert_equal, IS_PYPY, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_raises, assert_raises_regex, assert_warns, suppress_warnings, - _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM + _assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON, IS_WASM, + IS_64BIT, ) from numpy.testing._private.utils import _no_tracing, requires_memory from numpy._utils import asbytes, asunicode @@ -2265,7 +2266,7 @@ def test_void_compare_segfault(self): def test_reshape_size_overflow(self): # gh-7455 a = np.ones(20)[::2] - if np.dtype(np.intp).itemsize == 8: + if IS_64BIT: # 64 bit. The following are the prime factors of 2**63 + 5, # plus a leading 2, so when multiplied together as int64, # the result overflows to a total size of 10. diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py index d9b316dcc45d..25b638890a96 100644 --- a/numpy/f2py/tests/test_return_real.py +++ b/numpy/f2py/tests/test_return_real.py @@ -1,8 +1,8 @@ import platform import pytest -import numpy as np from numpy import array +from numpy.testing import IS_64BIT from . import util @@ -53,8 +53,7 @@ def check_function(self, t, tname): "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) class TestCReturnReal(TestReturnReal): suffix = ".pyf" diff --git a/numpy/f2py/tests/test_semicolon_split.py b/numpy/f2py/tests/test_semicolon_split.py index ab9c093dbb82..8a9eb8743501 100644 --- a/numpy/f2py/tests/test_semicolon_split.py +++ b/numpy/f2py/tests/test_semicolon_split.py @@ -1,6 +1,7 @@ import platform import pytest -import numpy as np + +from numpy.testing import IS_64BIT from . import util @@ -11,8 +12,7 @@ "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) class TestMultiline(util.F2PyTest): suffix = ".pyf" @@ -44,8 +44,7 @@ def test_multiline(self): "but not when run in isolation", ) @pytest.mark.skipif( - np.dtype(np.intp).itemsize < 8, - reason="32-bit builds are buggy" + not IS_64BIT, reason="32-bit builds are buggy" ) @pytest.mark.slow class TestCallstatement(util.F2PyTest): diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index f237dffbc244..0cac8819f5fd 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -283,7 +283,7 @@ import numpy as np from numpy.testing import ( assert_, assert_array_equal, assert_raises, assert_raises_regex, - assert_warns, IS_PYPY, IS_WASM + assert_warns, IS_PYPY, IS_WASM, IS_64BIT ) from numpy.testing._private.utils import requires_memory from numpy.lib import format @@ -927,8 +927,7 @@ def test_large_file_support(tmpdir): @pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy") -@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, - reason="test requires 64-bit system") +@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system") @pytest.mark.slow @requires_memory(free_bytes=2 * 2**30) def test_large_archive(tmpdir): diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 01fe6327713c..42e43e21f37b 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -46,7 +46,7 @@ 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', - 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', ] @@ -105,6 +105,7 @@ class KnownFailureException(Exception): IS_MUSL = True NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +IS_64BIT = np.dtype(np.intp).itemsize == 8 def assert_(val, msg=''): """ From 3b6288cc9f364078606c85a4ccbc008b9e35273e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 3 Mar 2025 11:53:54 -0700 Subject: [PATCH 128/187] BUG: skip legacy dtype multithreaded test on 32 bit runners [wheel build] --- numpy/_core/tests/test_multithreading.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/numpy/_core/tests/test_multithreading.py b/numpy/_core/tests/test_multithreading.py index b3bbbe403577..0f7e01aef033 100644 --- a/numpy/_core/tests/test_multithreading.py +++ b/numpy/_core/tests/test_multithreading.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from numpy.testing import IS_WASM +from numpy.testing import IS_WASM, IS_64BIT from numpy.testing._private.utils import run_threaded from numpy._core import _rational_tests @@ -257,20 +257,16 @@ def func(arr): f.result() +@pytest.mark.skipif( + not IS_64BIT, + reason="Sometimes causes failures or crashes due to OOM on 32 bit runners" +) def test_legacy_usertype_cast_init_thread_safety(): def closure(b): b.wait() np.full((10, 10), 1, _rational_tests.rational) - try: - run_threaded(closure, 250, pass_barrier=True) - except RuntimeError: - # The 32 bit linux runner will trigger this with 250 threads. I can - # trigger it on my Linux laptop with 500 threads but the CI runner is - # more resource-constrained. - # Reducing the number of threads means the test doesn't trigger the - # bug. Better to skip on some platforms than add a useless test. - pytest.skip("Couldn't spawn enough threads to run the test") + run_threaded(closure, 250, pass_barrier=True) @pytest.mark.parametrize("dtype", [bool, int, float]) def test_nonzero(dtype): From 2966a6d745b63e25222dcf55d3e1ffda89f1bd15 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Wed, 5 Mar 2025 21:48:44 +0100 Subject: [PATCH 129/187] BUG: Fix searchsorted and CheckFromAny byte-swapping logic * BUG: Fix searchsorted and CheckFromAny byte-swapping logic This closes gh-28190 and fixes another issue in the initial code that triggered the regression. Note that we may still want to avoid this, since this does lead to constructing (view compatible) structured dtypes unnecessarily here. It would also compactify the dtype. For building unnecessary dtypes, the better solution may be to just introduce a "canonical" flag to the dtypes (now that we have the space). * STY: Adopt code comment suggestions --- numpy/_core/src/multiarray/ctors.c | 18 ++++++++---------- numpy/_core/src/multiarray/item_selection.c | 18 +++++++----------- numpy/_core/tests/test_regression.py | 6 ++++++ 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index b6a935e419a6..0eeb8e61f153 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -1821,32 +1821,30 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, * Internal version of PyArray_CheckFromAny that accepts a dtypemeta. Borrows * references to the descriptor and dtype. */ - NPY_NO_EXPORT PyObject * PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, PyArray_DTypeMeta *in_DType, int min_depth, int max_depth, int requires, PyObject *context) { PyObject *obj; + Py_XINCREF(in_descr); /* take ownership as we may replace it */ if (requires & NPY_ARRAY_NOTSWAPPED) { - if (!in_descr && PyArray_Check(op) && - PyArray_ISBYTESWAPPED((PyArrayObject* )op)) { - in_descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op)); + if (!in_descr && PyArray_Check(op)) { + in_descr = PyArray_DESCR((PyArrayObject *)op); + Py_INCREF(in_descr); + } + if (in_descr) { + PyArray_DESCR_REPLACE_CANONICAL(in_descr); if (in_descr == NULL) { return NULL; } } - else if (in_descr && !PyArray_ISNBO(in_descr->byteorder)) { - PyArray_DESCR_REPLACE(in_descr); - } - if (in_descr && in_descr->byteorder != NPY_IGNORE && in_descr->byteorder != NPY_NATIVE) { - in_descr->byteorder = NPY_NATIVE; - } } int was_scalar; obj = PyArray_FromAny_int(op, in_descr, in_DType, min_depth, max_depth, requires, context, &was_scalar); + Py_XDECREF(in_descr); if (obj == NULL) { return NULL; } diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 6e9b03a41343..4549f107d76e 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -2111,7 +2111,6 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, if (dtype == NULL) { return NULL; } - /* refs to dtype we own = 1 */ /* Look for binary search function */ if (perm) { @@ -2122,26 +2121,23 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, } if (binsearch == NULL && argbinsearch == NULL) { PyErr_SetString(PyExc_TypeError, "compare not supported for type"); - /* refs to dtype we own = 1 */ Py_DECREF(dtype); - /* refs to dtype we own = 0 */ return NULL; } - /* need ap2 as contiguous array and of right type */ - /* refs to dtype we own = 1 */ - Py_INCREF(dtype); - /* refs to dtype we own = 2 */ + /* need ap2 as contiguous array and of right dtype (note: steals dtype reference) */ ap2 = (PyArrayObject *)PyArray_CheckFromAny(op2, dtype, 0, 0, NPY_ARRAY_CARRAY_RO | NPY_ARRAY_NOTSWAPPED, NULL); - /* refs to dtype we own = 1, array creation steals one even on failure */ if (ap2 == NULL) { - Py_DECREF(dtype); - /* refs to dtype we own = 0 */ return NULL; } + /* + * The dtype reference we had was used for creating ap2, which may have + * replaced it with another. So here we copy the dtype of ap2 and use it for `ap1`. + */ + dtype = (PyArray_Descr *)Py_NewRef(PyArray_DESCR(ap2)); /* * If the needle (ap2) is larger than the haystack (op1) we copy the @@ -2150,9 +2146,9 @@ PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, if (PyArray_SIZE(ap2) > PyArray_SIZE(op1)) { ap1_flags |= NPY_ARRAY_CARRAY_RO; } + /* dtype is stolen, after this we have no reference */ ap1 = (PyArrayObject *)PyArray_CheckFromAny((PyObject *)op1, dtype, 1, 1, ap1_flags, NULL); - /* refs to dtype we own = 0, array creation steals one even on failure */ if (ap1 == NULL) { goto fail; } diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 13b4ad16d592..851ce324d76c 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -2655,3 +2655,9 @@ def test_sort_overlap(self): inp = np.linspace(0, size, num=size, dtype=np.intc) out = np.sort(inp) assert_equal(inp, out) + + def test_searchsorted_structured(self): + # gh-28190 + x = np.array([(0, 1.)], dtype=[('time', ' Date: Thu, 6 Mar 2025 02:21:30 -0500 Subject: [PATCH 130/187] BUG: sanity check ``__array_interface__`` number of dimensions (#28407) ``__array_interface__`` should typically not have more dimensions than NumPy supports, but unlike other malformed interfaces, this should fail gracefully if someone were to pass more. --- numpy/_core/src/multiarray/ctors.c | 8 +++++++- numpy/_core/tests/test_multiarray.py | 21 +++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index 0eeb8e61f153..f4f66142101c 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -2153,7 +2153,7 @@ PyArray_FromInterface(PyObject *origin) PyArray_Descr *dtype = NULL; char *data = NULL; Py_buffer view; - int i, n; + Py_ssize_t i, n; npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; @@ -2269,6 +2269,12 @@ PyArray_FromInterface(PyObject *origin) /* Get dimensions from shape tuple */ else { n = PyTuple_GET_SIZE(attr); + if (n > NPY_MAXDIMS) { + PyErr_Format(PyExc_ValueError, + "number of dimensions must be within [0, %d], got %d", + NPY_MAXDIMS, n); + goto fail; + } for (i = 0; i < n; i++) { PyObject *tmp = PyTuple_GET_ITEM(attr, i); dims[i] = PyArray_PyIntAsIntp(tmp); diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index cc9fc9b19ac8..87508732d85c 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -10358,3 +10358,24 @@ def test_to_device(self): r"The stream argument in to_device\(\) is not supported" ): arr.to_device("cpu", stream=1) + +def test_array_interface_excess_dimensions_raises(): + """Regression test for gh-27949: ensure too many dims raises ValueError instead of segfault.""" + + # Dummy object to hold a custom __array_interface__ + class DummyArray: + def __init__(self, interface): + # Attach the array interface dict to mimic an array + self.__array_interface__ = interface + + # Create a base array (scalar) and copy its interface + base = np.array(42) # base can be any scalar or array + interface = dict(base.__array_interface__) + + # Modify the shape to exceed NumPy's dimension limit (NPY_MAXDIMS, typically 64) + interface['shape'] = tuple([1] * 136) # match the original bug report + + dummy = DummyArray(interface) + # Now, using np.asanyarray on this dummy should trigger a ValueError (not segfault) + with pytest.raises(ValueError, match="dimensions must be within"): + np.asanyarray(dummy) \ No newline at end of file From 85f2711e17211c7003a8ea9e30dc2e9796d39c44 Mon Sep 17 00:00:00 2001 From: Mark Harfouche Date: Tue, 21 Jan 2025 14:17:17 -0500 Subject: [PATCH 131/187] MAINT: Hide decorator from pytest traceback Currently it points the user to the internal numpy function ``` kwargs = {'strict': False}, old_name = 'y', new_name = 'desired' @functools.wraps(fun) def wrapper(*args, **kwargs): for old_name, new_name in zip(old_names, new_names): if old_name in kwargs: if dep_version: end_version = dep_version.split('.') end_version[1] = str(int(end_version[1]) + 2) end_version = '.'.join(end_version) msg = (f"Use of keyword argument `{old_name}` is " f"deprecated and replaced by `{new_name}`. " f"Support for `{old_name}` will be removed " f"in NumPy {end_version}.") warnings.warn(msg, DeprecationWarning, stacklevel=2) if new_name in kwargs: msg = (f"{fun.__name__}() got multiple values for " f"argument now known as `{new_name}`") raise TypeError(msg) kwargs[new_name] = kwargs.pop(old_name) > return fun(*args, **kwargs) E AssertionError: E Arrays are not equal E E (shapes (2, 2, 200, 200), (2, 2, 800, 800) mismatch) E ACTUAL: array([[[[0, 0, 0, ..., 0, 0, 0], E [0, 0, 0, ..., 0, 0, 0], E [0, 0, 0, ..., 0, 0, 0],... E DESIRED: array([[[[0, 0, 0, ..., 0, 0, 0], E [0, 0, 0, ..., 0, 0, 0], E [0, 0, 0, ..., 0, 0, 0],... ../../miniforge3/envs/dev/lib/python3.10/site-packages/numpy/_utils/__init__.py:85: AssertionError ``` --- numpy/_utils/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_utils/__init__.py b/numpy/_utils/__init__.py index 9794c4e0c4a1..ca3aacd84d5b 100644 --- a/numpy/_utils/__init__.py +++ b/numpy/_utils/__init__.py @@ -66,6 +66,7 @@ def _rename_parameter(old_names, new_names, dep_version=None): def decorator(fun): @functools.wraps(fun) def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test for old_name, new_name in zip(old_names, new_names): if old_name in kwargs: if dep_version: From 7d567aa5b3781be5c8cd343f52b833f28fd6a277 Mon Sep 17 00:00:00 2001 From: "Guan Ming(Wesley) Chiu" <105915352+guan404ming@users.noreply.github.com> Date: Sat, 8 Mar 2025 16:37:23 +0800 Subject: [PATCH 132/187] TYP: stub ``random._pickle`` (#28452) * Add random._pickle.pyi * Add new file in meson.build * Align style in numpy/random/meson.build --- numpy/random/_pickle.pyi | 43 ++++++++++++++++++++++++++++++++++++++++ numpy/random/meson.build | 1 + 2 files changed, 44 insertions(+) create mode 100644 numpy/random/_pickle.pyi diff --git a/numpy/random/_pickle.pyi b/numpy/random/_pickle.pyi new file mode 100644 index 000000000000..d4c6e8155ae9 --- /dev/null +++ b/numpy/random/_pickle.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable +from typing import Final, Literal, TypeVar, TypedDict, overload, type_check_only + +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64, PCG64DXSM +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import BitGenerator +from numpy.random.mtrand import RandomState + +_T = TypeVar("_T", bound=BitGenerator) + +@type_check_only +class _BitGenerators(TypedDict): + MT19937: type[MT19937] + PCG64: type[PCG64] + PCG64DXSM: type[PCG64DXSM] + Philox: type[Philox] + SFC64: type[SFC64] + +BitGenerators: Final[_BitGenerators] = ... + +@overload +def __bit_generator_ctor(bit_generator: Literal["MT19937"] = "MT19937") -> MT19937: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64"]) -> PCG64: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64DXSM"]) -> PCG64DXSM: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... +@overload +def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __generator_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> Generator: ... +def __randomstate_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> RandomState: ... diff --git a/numpy/random/meson.build b/numpy/random/meson.build index f2f2e0ac755c..be342c443b32 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -104,6 +104,7 @@ py.install_sources( '_mt19937.pyi', '_pcg64.pyi', '_pickle.py', + '_pickle.pyi', '_philox.pyi', '_sfc64.pyi', 'bit_generator.pxd', From 5f957463d06c1ba15be3de5b71bd54d3b202cb30 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 13 Mar 2025 03:08:32 +0100 Subject: [PATCH 133/187] TYP: fix typing typing errors in `_core.shape_base` Backport of numpy/numtype#108 --- numpy/_core/shape_base.pyi | 76 +++++++++++++------ .../tests/data/reveal/array_constructors.pyi | 20 +++-- 2 files changed, 64 insertions(+), 32 deletions(-) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 0dadded9423a..decb7be48f9e 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -1,14 +1,8 @@ from collections.abc import Sequence -from typing import TypeVar, overload, Any, SupportsIndex +from typing import Any, SupportsIndex, TypeVar, overload -from numpy import generic, _CastingKind -from numpy._typing import ( - NDArray, - ArrayLike, - DTypeLike, - _ArrayLike, - _DTypeLike, -) +from numpy import _CastingKind, generic +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike __all__ = [ "atleast_1d", @@ -22,29 +16,54 @@ __all__ = [ ] _SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_SCT1 = TypeVar("_SCT1", bound=generic) +_SCT2 = TypeVar("_SCT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +### + +@overload +def atleast_1d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... @overload -def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_1d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... @overload -def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_1d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# +@overload +def atleast_2d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload -def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_2d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... @overload -def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_2d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... @overload -def atleast_2d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# +@overload +def atleast_3d(a0: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_SCT1], a1: _ArrayLike[_SCT2], /) -> tuple[NDArray[_SCT1], NDArray[_SCT2]]: ... @overload -def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... +def atleast_3d(a0: _ArrayLike[_SCT], a1: _ArrayLike[_SCT], /, *arys: _ArrayLike[_SCT]) -> tuple[NDArray[_SCT], ...]: ... @overload -def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ... +def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... @overload -def atleast_3d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ... +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... +# @overload def vstack( tup: Sequence[_ArrayLike[_SCT]], @@ -119,12 +138,21 @@ def stack( @overload def stack( arrays: Sequence[ArrayLike], - axis: SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex, + out: _ArrayT, *, - dtype: DTypeLike = ..., - casting: _CastingKind = ... -) -> _ArrayType: ... + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... @overload def unstack( diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index c6d56ab0de2d..35861cc0e942 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -203,26 +203,30 @@ assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) assert_type(np.atleast_1d(C), npt.NDArray[Any]) -assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[Any], ...]) -assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], ...]) -assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) -assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) -assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[Any], ...]) +assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) -assert_type(np.vstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.vstack([A, C]), npt.NDArray[Any]) assert_type(np.vstack([C, C]), npt.NDArray[Any]) assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) -assert_type(np.hstack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, A]), npt.NDArray[np.float64]) -assert_type(np.stack([A, A], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) assert_type(np.stack([A, C]), npt.NDArray[Any]) assert_type(np.stack([C, C]), npt.NDArray[Any]) assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) From 6c5a7853b20f1a0039a60f0de4800511deb9bcc6 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 13 Mar 2025 16:35:41 +0100 Subject: [PATCH 134/187] TYP: fix typing errors in `_core.records` backport of numpy/numtype#116 --- numpy/_core/records.pyi | 365 +++++++++++-------------- numpy/typing/tests/data/reveal/rec.pyi | 2 +- 2 files changed, 163 insertions(+), 204 deletions(-) diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index ef60803ffeb4..308f96b7407b 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,48 +1,24 @@ -from _typeshed import StrOrBytesPath -from collections.abc import Sequence, Iterable +# ruff: noqa: ANN401 +# pyright: reportSelfClsParameterName=false +from collections.abc import Iterable, Sequence from types import EllipsisType -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, - Protocol, - SupportsIndex, - Literal, - type_check_only -) +from typing import Any, Literal, Protocol, SupportsIndex, TypeAlias, TypeVar, overload, type_check_only -from numpy import ( - ndarray, - dtype, - generic, - void, - _ByteOrder, - _SupportsBuffer, - _OrderKACF, -) +from _typeshed import StrOrBytesPath -from numpy._typing import ( - ArrayLike, - DTypeLike, - NDArray, - _Shape, - _ShapeLike, - _ArrayLikeInt_co, - _ArrayLikeVoid_co, - _NestedSequence, -) +from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer, dtype, generic, ndarray, void +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeVoid_co, _NestedSequence, _ShapeLike __all__ = [ - "record", - "recarray", + "array", + "find_duplicate", "format_parser", "fromarrays", + "fromfile", "fromrecords", "fromstring", - "fromfile", - "array", - "find_duplicate", + "recarray", + "record", ] _T = TypeVar("_T") @@ -58,6 +34,9 @@ class _SupportsReadInto(Protocol): def tell(self, /) -> int: ... def readinto(self, buffer: memoryview, /) -> int: ... +### + +# exported in `numpy.rec` class record(void): def __getattribute__(self, attr: str) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike) -> None: ... @@ -67,6 +46,7 @@ class record(void): @overload def __getitem__(self, key: list[str]) -> record: ... +# exported in `numpy.rec` class recarray(ndarray[_ShapeT_co, _DType_co]): # NOTE: While not strictly mandatory, we're demanding here that arguments # for the `format_parser`- and `dtype`-based dtype constructors are @@ -75,273 +55,252 @@ class recarray(ndarray[_ShapeT_co, _DType_co]): def __new__( subtype, shape: _ShapeLike, - dtype: None = ..., - buf: None | _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., + dtype: None = None, + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - byteorder: None | _ByteOrder = ..., - aligned: bool = ..., - order: _OrderKACF = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + order: _OrderKACF = "C", ) -> recarray[Any, dtype[record]]: ... @overload def __new__( subtype, shape: _ShapeLike, dtype: DTypeLike, - buf: None | _SupportsBuffer = ..., - offset: SupportsIndex = ..., - strides: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - byteorder: None = ..., - aligned: Literal[False] = ..., - order: _OrderKACF = ..., + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + byteorder: None = None, + aligned: Literal[False] = False, + order: _OrderKACF = "C", ) -> recarray[Any, dtype[Any]]: ... - def __array_finalize__(self, obj: object) -> None: ... - def __getattribute__(self, attr: str) -> Any: ... - def __setattr__(self, attr: str, val: ArrayLike) -> None: ... - @overload - def __getitem__(self, indx: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - )) -> Any: ... + def __array_finalize__(self, /, obj: object) -> None: ... + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... @overload - def __getitem__(self: recarray[Any, dtype[void]], indx: ( - None - | slice - | EllipsisType - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> recarray[_Shape, _DType_co]: ... + def field(self, /, attr: int | str, val: None = None) -> Any: ... @overload - def __getitem__(self, indx: ( - None - | slice - | EllipsisType - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> ndarray[_Shape, _DType_co]: ... - @overload - def __getitem__(self, indx: str) -> NDArray[Any]: ... - @overload - def __getitem__(self, indx: list[str]) -> recarray[_ShapeT_co, dtype[record]]: ... - @overload - def field(self, attr: int | str, val: None = ...) -> Any: ... - @overload - def field(self, attr: int | str, val: ArrayLike) -> None: ... + def field(self, /, attr: int | str, val: ArrayLike) -> None: ... +# exported in `numpy.rec` class format_parser: dtype: dtype[void] def __init__( self, + /, formats: DTypeLike, - names: None | str | Sequence[str], - titles: None | str | Sequence[str], - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None, + titles: str | Sequence[str] | None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> None: ... +# exported in `numpy.rec` @overload def fromarrays( arrayList: Iterable[ArrayLike], - dtype: DTypeLike = ..., - shape: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[Any]: ... @overload def fromarrays( arrayList: Iterable[ArrayLike], - dtype: None = ..., - shape: None | _ShapeLike = ..., + dtype: None = None, + shape: _ShapeLike | None = None, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], - dtype: DTypeLike = ..., - shape: None | _ShapeLike = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[record]: ... @overload def fromrecords( - recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]], - dtype: None = ..., - shape: None | _ShapeLike = ..., + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: None = None, + shape: _ShapeLike | None = None, *, - formats: DTypeLike = ..., - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def fromstring( datastring: _SupportsBuffer, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[record]: ... @overload def fromstring( datastring: _SupportsBuffer, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, ) -> _RecArray[Any]: ... @overload def fromfile( fd: StrOrBytesPath | _SupportsReadInto, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, ) -> _RecArray[record]: ... +# exported in `numpy.rec` @overload def array( obj: _SCT | NDArray[_SCT], - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[_SCT]: ... @overload def array( obj: ArrayLike, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: ArrayLike, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... @overload def array( obj: None, dtype: DTypeLike, shape: _ShapeLike, - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: None, - dtype: None = ..., + dtype: None = None, *, shape: _ShapeLike, - offset: int = ..., + offset: int = 0, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... @overload def array( obj: _SupportsReadInto, dtype: DTypeLike, - shape: None | _ShapeLike = ..., - offset: int = ..., - formats: None = ..., - names: None = ..., - titles: None = ..., - aligned: bool = ..., - byteorder: None = ..., - copy: bool = ..., + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, ) -> _RecArray[Any]: ... @overload def array( obj: _SupportsReadInto, - dtype: None = ..., - shape: None | _ShapeLike = ..., - offset: int = ..., + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, *, formats: DTypeLike, - names: None | str | Sequence[str] = ..., - titles: None | str | Sequence[str] = ..., - aligned: bool = ..., - byteorder: None | _ByteOrder = ..., - copy: bool = ..., + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, ) -> _RecArray[record]: ... +# exported in `numpy.rec` def find_duplicate(list: Iterable[_T]) -> list[_T]: ... diff --git a/numpy/typing/tests/data/reveal/rec.pyi b/numpy/typing/tests/data/reveal/rec.pyi index 13db0a969773..1b88f6b46316 100644 --- a/numpy/typing/tests/data/reveal/rec.pyi +++ b/numpy/typing/tests/data/reveal/rec.pyi @@ -7,7 +7,7 @@ import numpy.typing as npt from typing_extensions import assert_type AR_i8: npt.NDArray[np.int64] -REC_AR_V: np.recarray[Any, np.dtype[np.record]] +REC_AR_V: np.recarray[tuple[int, ...], np.dtype[np.record]] AR_LIST: list[npt.NDArray[np.int64]] record: np.record From 8c6007f5f7d2ea43210184f2f0e1c30e8157e367 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Fri, 14 Mar 2025 20:23:15 -0600 Subject: [PATCH 135/187] TYP: Backport typing fixes from #28505, #28506, #28508, and #28511 (#28521) * TYP: stub ``numpy._core.umath`` Backport of numpy/numtype#123 * TYP: fix typing errors in ``numpy.lib._arrayterator_impl`` Ported from numpy/numtype#165 * TYP: fix signatures of ``ndarray.put`` and ``ndarray.view`` partial port of numpy/numtype#200 * TYP: fix typing errors in ``_core.fromnumeric`` Partial port of numpy/numtype#221 * TYP: fix typing errors in ``_core.function_base`` Partial port of numpy/numtype#221 * TYP: add missing implicit re-exports in ``_core.numeric`` Partial port of numpy/numtype#221 * STY: fix `E203` flake8 error --------- Co-authored-by: jorenham --- numpy/__init__.pyi | 44 +- numpy/_core/fromnumeric.pyi | 767 ++++++++++++++++++++----------- numpy/_core/function_base.pyi | 91 ++-- numpy/_core/meson.build | 1 + numpy/_core/numeric.pyi | 7 + numpy/_core/umath.pyi | 197 ++++++++ numpy/lib/_arrayterator_impl.pyi | 63 ++- 7 files changed, 805 insertions(+), 365 deletions(-) create mode 100644 numpy/_core/umath.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 1a2d6a08bbb1..7f6bd9db55a4 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1097,8 +1097,12 @@ class _HasShape(Protocol[_ShapeT_co]): def shape(self, /) -> _ShapeT_co: ... @type_check_only -class _HasShapeAndSupportsItem(_HasShape[_ShapeT_co], _SupportsItem[_T_co], Protocol[_ShapeT_co, _T_co]): - pass +class _HasDType(Protocol[_T_co]): + @property + def dtype(self, /) -> _T_co: ... + +@type_check_only +class _HasShapeAndSupportsItem(_HasShape[_ShapeT_co], _SupportsItem[_T_co], Protocol[_ShapeT_co, _T_co]): ... # matches any `x` on `x.type.item() -> _T_co`, e.g. `dtype[np.int8]` gives `_T_co: int` @type_check_only @@ -2345,12 +2349,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable - def put( - self, - ind: _ArrayLikeInt_co, - v: ArrayLike, - mode: _ModeKind = ..., - ) -> None: ... + def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... @overload def searchsorted( # type: ignore[misc] @@ -2537,20 +2536,21 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): copy: builtins.bool | _CopyMode = ..., ) -> ndarray[_ShapeT_co, dtype[Any]]: ... - @overload - def view(self) -> Self: ... - @overload - def view(self, type: type[_ArrayT]) -> _ArrayT: ... - @overload - def view(self, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]: ... - @overload - def view(self, dtype: DTypeLike) -> NDArray[Any]: ... - @overload - def view( - self, - dtype: DTypeLike, - type: type[_ArrayT], - ) -> _ArrayT: ... + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view(self, /, dtype: _DType | _HasDType[_DType]) -> ndarray[_ShapeT_co, _DType]: ... + @overload # (dtype: dtype[T]) + def view(self, /, dtype: _DTypeLike[_SCT]) -> NDArray[_SCT]: ... + @overload # (type: T) + def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (_: T) + def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype[Any]]: ... + @overload # (dtype: ?, type: type[T]) + def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... @overload def getfield( diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 0465cc5aaa54..f7207db17b0c 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -1,3 +1,4 @@ +# ruff: noqa: ANN401 from collections.abc import Sequence from typing import ( Any, @@ -34,6 +35,7 @@ from numpy import ( _SortSide, _CastingKind, ) +from numpy._globals import _NoValueType from numpy._typing import ( DTypeLike, _DTypeLike, @@ -105,7 +107,7 @@ __all__ = [ _SCT = TypeVar("_SCT", bound=generic) _SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) -_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) _SizeType = TypeVar("_SizeType", bound=int) _ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) _ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) @@ -120,7 +122,7 @@ class _SupportsShape(Protocol[_ShapeType_co]): _T = TypeVar("_T") _PyArray: TypeAlias = list[_T] | tuple[_T, ...] # `int` also covers `bool` -_PyScalar: TypeAlias = int | float | complex | bytes | str +_PyScalar: TypeAlias = float | complex | bytes | str @overload def take( @@ -134,7 +136,7 @@ def take( def take( a: ArrayLike, indices: _IntLike_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., ) -> Any: ... @@ -142,7 +144,7 @@ def take( def take( a: _ArrayLike[_SCT], indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., ) -> NDArray[_SCT]: ... @@ -150,7 +152,7 @@ def take( def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., mode: _ModeKind = ..., ) -> NDArray[Any]: ... @@ -158,10 +160,19 @@ def take( def take( a: ArrayLike, indices: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, mode: _ModeKind = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... @overload def reshape( # shape: index @@ -258,21 +269,21 @@ def choose( def choose( a: _ArrayLikeInt_co, choices: ArrayLike, - out: _ArrayType = ..., + out: _ArrayT, mode: _ModeKind = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def repeat( a: _ArrayLike[_SCT], repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., ) -> NDArray[_SCT]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., ) -> NDArray[Any]: ... def put( @@ -298,70 +309,70 @@ def swapaxes( @overload def transpose( a: _ArrayLike[_SCT], - axes: None | _ShapeLike = ... + axes: _ShapeLike | None = ... ) -> NDArray[_SCT]: ... @overload def transpose( a: ArrayLike, - axes: None | _ShapeLike = ... + axes: _ShapeLike | None = ... ) -> NDArray[Any]: ... @overload -def matrix_transpose(x: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +def matrix_transpose(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload -def matrix_transpose(x: ArrayLike) -> NDArray[Any]: ... +def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... @overload def partition( a: _ArrayLike[_SCT], kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + order: str | Sequence[str] | None = ..., ) -> NDArray[_SCT]: ... @overload def partition( a: ArrayLike, kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + order: str | Sequence[str] | None = ..., ) -> NDArray[Any]: ... def argpartition( a: ArrayLike, kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = -1, kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + order: str | Sequence[str] | None = ..., ) -> NDArray[intp]: ... @overload def sort( a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., + stable: bool | None = ..., ) -> NDArray[_SCT]: ... @overload def sort( a: ArrayLike, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., + stable: bool | None = ..., ) -> NDArray[Any]: ... def argsort( a: ArrayLike, - axis: None | SupportsIndex = ..., - kind: None | _SortKind = ..., - order: None | str | Sequence[str] = ..., + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., *, - stable: None | bool = ..., + stable: bool | None = ..., ) -> NDArray[intp]: ... @overload @@ -375,7 +386,7 @@ def argmax( @overload def argmax( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., *, keepdims: bool = ..., @@ -383,11 +394,19 @@ def argmax( @overload def argmax( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, *, keepdims: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... @overload def argmin( @@ -400,7 +419,7 @@ def argmin( @overload def argmin( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., *, keepdims: bool = ..., @@ -408,25 +427,33 @@ def argmin( @overload def argmin( a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., + axis: SupportsIndex | None, + out: _ArrayT, *, keepdims: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... @overload def searchsorted( a: ArrayLike, v: _ScalarLike_co, side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., # 1D int array + sorter: _ArrayLikeInt_co | None = ..., # 1D int array ) -> intp: ... @overload def searchsorted( a: ArrayLike, v: ArrayLike, side: _SortSide = ..., - sorter: None | _ArrayLikeInt_co = ..., # 1D int array + sorter: _ArrayLikeInt_co | None = ..., # 1D int array ) -> NDArray[intp]: ... # unlike `reshape`, `resize` only accepts positive integers, so literal ints can be used @@ -450,17 +477,17 @@ def resize(a: ArrayLike, new_shape: Sequence[SupportsIndex]) -> NDArray[Any]: .. @overload def squeeze( a: _SCT, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., ) -> _SCT: ... @overload def squeeze( a: _ArrayLike[_SCT], - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., ) -> NDArray[_SCT]: ... @overload def squeeze( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., ) -> NDArray[Any]: ... @overload @@ -488,14 +515,24 @@ def trace( out: None = ..., ) -> Any: ... @overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload def trace( a: ArrayLike, # >= 2D array offset: SupportsIndex = ..., axis1: SupportsIndex = ..., axis2: SupportsIndex = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... _Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] @@ -547,120 +584,128 @@ def shape(a: ArrayLike) -> tuple[int, ...]: ... def compress( condition: _ArrayLikeBool_co, # 1D bool array a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., ) -> NDArray[_SCT]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def compress( condition: _ArrayLikeBool_co, # 1D bool array a: ArrayLike, - axis: None | SupportsIndex = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + axis: SupportsIndex | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def clip( a: _SCT, - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> _SCT: ... @overload def clip( a: _ScalarLike_co, - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> Any: ... @overload def clip( a: _ArrayLike[_SCT], - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> NDArray[_SCT]: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, out: None = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., dtype: None = ..., - where: None | _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., ) -> NDArray[Any]: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: _ArrayType = ..., + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _ArrayT, *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., - dtype: DTypeLike, - where: None | _ArrayLikeBool_co = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike = ..., + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., -) -> Any: ... +) -> _ArrayT: ... @overload def clip( a: ArrayLike, - a_min: None | ArrayLike, - a_max: None | ArrayLike, - out: _ArrayType, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: ArrayLike = ..., *, - min: None | ArrayLike = ..., - max: None | ArrayLike = ..., - dtype: DTypeLike = ..., - where: None | _ArrayLikeBool_co = ..., + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike, + where: _ArrayLikeBool_co | None = ..., order: _OrderKACF = ..., subok: bool = ..., - signature: str | tuple[None | str, ...] = ..., + signature: str | tuple[str | None, ...] = ..., casting: _CastingKind = ..., -) -> _ArrayType: ... +) -> Any: ... @overload def sum( @@ -706,7 +751,7 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike, + axis: _ShapeLike | None, dtype: _DTypeLike[_SCT], out: None = ..., keepdims: bool = ..., @@ -716,7 +761,7 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., *, dtype: _DTypeLike[_SCT], out: None = ..., @@ -727,7 +772,7 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., keepdims: bool = ..., @@ -737,130 +782,157 @@ def sum( @overload def sum( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., + *, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def all( a: ArrayLike, axis: None = None, out: None = None, - keepdims: Literal[False, 0] = False, + keepdims: Literal[False, 0] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... @overload def all( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: SupportsIndex = False, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool | NDArray[np.bool]: ... @overload def all( a: ArrayLike, - axis: None | int | tuple[int, ...], - out: _ArrayType, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def all( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, *, - out: _ArrayType, - keepdims: SupportsIndex = False, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def any( a: ArrayLike, axis: None = None, out: None = None, - keepdims: Literal[False, 0] = False, + keepdims: Literal[False, 0] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool: ... @overload def any( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, out: None = None, - keepdims: SupportsIndex = False, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> np.bool | NDArray[np.bool]: ... @overload def any( a: ArrayLike, - axis: None | int | tuple[int, ...], - out: _ArrayType, - keepdims: SupportsIndex = False, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., *, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def any( a: ArrayLike, - axis: None | int | tuple[int, ...] = None, + axis: int | tuple[int, ...] | None = None, *, - out: _ArrayType, - keepdims: SupportsIndex = False, - where: _ArrayLikeBool_co = True, -) -> _ArrayType: ... + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def cumsum( a: _ArrayLike[_SCT], - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[_SCT]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None, + dtype: _DTypeLike[_SCT], + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_SCT], out: None = ..., ) -> NDArray[_SCT]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def cumsum( a: ArrayLike, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def cumulative_sum( x: _ArrayLike[_SCT], /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -870,7 +942,7 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -880,8 +952,8 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_SCT], out: None = ..., include_initial: bool = ..., ) -> NDArray[_SCT]: ... @@ -890,7 +962,7 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., include_initial: bool = ..., @@ -900,11 +972,11 @@ def cumulative_sum( x: ArrayLike, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., + out: _ArrayT, include_initial: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def ptp( @@ -916,17 +988,25 @@ def ptp( @overload def ptp( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., out: None = ..., keepdims: bool = ..., ) -> Any: ... @overload def ptp( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + out: _ArrayT, keepdims: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... @overload def amax( @@ -940,7 +1020,7 @@ def amax( @overload def amax( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -949,12 +1029,22 @@ def amax( @overload def amax( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... @overload def amin( @@ -968,7 +1058,7 @@ def amin( @overload def amin( a: ArrayLike, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -977,12 +1067,22 @@ def amin( @overload def amin( a: ArrayLike, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... # TODO: `np.prod()``: For object arrays `initial` does not necessarily # have to be a numerical scalar. @@ -1044,7 +1144,7 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., keepdims: bool = ..., @@ -1052,10 +1152,21 @@ def prod( where: _ArrayLikeBool_co = ..., ) -> Any: ... @overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _SCT: ... +@overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + *, + dtype: _DTypeLike[_SCT], out: None = ..., keepdims: Literal[False] = ..., initial: _NumberLike_co = ..., @@ -1064,8 +1175,8 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None | DTypeLike = ..., + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., out: None = ..., keepdims: bool = ..., initial: _NumberLike_co = ..., @@ -1074,84 +1185,111 @@ def prod( @overload def prod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: None | DTypeLike = ..., - out: _ArrayType = ..., + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, keepdims: bool = ..., initial: _NumberLike_co = ..., where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... @overload def cumprod( a: _ArrayLikeBool_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[int_]: ... @overload def cumprod( a: _ArrayLikeUInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[uint64]: ... @overload def cumprod( a: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[int64]: ... @overload def cumprod( a: _ArrayLikeFloat_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[floating[Any]]: ... @overload def cumprod( a: _ArrayLikeComplex_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[complexfloating[Any, Any]]: ... @overload def cumprod( a: _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., ) -> NDArray[object_]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None, + dtype: _DTypeLike[_SCT], + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_SCT], out: None = ..., ) -> NDArray[_SCT]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., ) -> NDArray[Any]: ... @overload def cumprod( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def cumulative_prod( x: _ArrayLikeBool_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1161,7 +1299,7 @@ def cumulative_prod( x: _ArrayLikeUInt_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1171,7 +1309,7 @@ def cumulative_prod( x: _ArrayLikeInt_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1181,7 +1319,7 @@ def cumulative_prod( x: _ArrayLikeFloat_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1191,7 +1329,7 @@ def cumulative_prod( x: _ArrayLikeComplex_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1201,7 +1339,7 @@ def cumulative_prod( x: _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: None = ..., out: None = ..., include_initial: bool = ..., @@ -1211,8 +1349,8 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_SCT], out: None = ..., include_initial: bool = ..., ) -> NDArray[_SCT]: ... @@ -1221,7 +1359,7 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., out: None = ..., include_initial: bool = ..., @@ -1231,15 +1369,15 @@ def cumulative_prod( x: _ArrayLikeComplex_co | _ArrayLikeObject_co, /, *, - axis: None | SupportsIndex = ..., + axis: SupportsIndex | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., + out: _ArrayT, include_initial: bool = ..., -) -> _ArrayType: ... +) -> _ArrayT: ... def ndim(a: ArrayLike) -> int: ... -def size(a: ArrayLike, axis: None | int = ...) -> int: ... +def size(a: ArrayLike, axis: int | None = ...) -> int: ... @overload def around( @@ -1278,11 +1416,18 @@ def around( out: None = ..., ) -> NDArray[Any]: ... @overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex, + out: _ArrayT, +) -> _ArrayT: ... +@overload def around( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, decimals: SupportsIndex = ..., - out: _ArrayType = ..., -) -> _ArrayType: ... + *, + out: _ArrayT, +) -> _ArrayT: ... @overload def mean( @@ -1290,9 +1435,9 @@ def mean( axis: None = ..., dtype: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> floating[Any]: ... @overload def mean( @@ -1300,9 +1445,9 @@ def mean( axis: None = ..., dtype: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> complexfloating[Any, Any]: ... @overload def mean( @@ -1310,40 +1455,40 @@ def mean( axis: None = ..., dtype: None = ..., out: None = ..., - keepdims: Literal[False] = ..., + keepdims: Literal[False] | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> timedelta64: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: None, + dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: Literal[False] = ..., + keepdims: bool | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., -) -> _SCT: ... + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _SCT | NDArray[_SCT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None, + axis: None = ..., + *, dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool = ..., - *, - where: _ArrayLikeBool_co = ..., -) -> _SCT | NDArray[_SCT]: ... + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _SCT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1351,29 +1496,39 @@ def mean( *, dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool = ..., - where: _ArrayLikeBool_co = ..., + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> _SCT | NDArray[_SCT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., - keepdims: bool = ..., + keepdims: bool | _NoValueType = ..., *, - where: _ArrayLikeBool_co = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., ) -> Any: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., - out: _ArrayType = ..., - keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., -) -> _ArrayType: ... + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def std( @@ -1381,65 +1536,91 @@ def std( axis: None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> floating[Any]: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: None, + dtype: _DTypeLike[_SCT], out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _SCT: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _SCT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def std( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - ddof: int | float = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... @overload def var( @@ -1447,65 +1628,91 @@ def var( axis: None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> floating[Any]: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: None = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - dtype: _DTypeLike[_SCT] = ..., + axis: None, + dtype: _DTypeLike[_SCT], out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: Literal[False] = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _SCT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_SCT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> _SCT: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., + axis: _ShapeLike | None = ..., dtype: DTypeLike = ..., out: None = ..., - ddof: int | float = ..., + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., ) -> Any: ... @overload def var( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - dtype: DTypeLike = ..., - out: _ArrayType = ..., - ddof: int | float = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., keepdims: bool = ..., *, - where: _ArrayLikeBool_co = ..., - mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., - correction: int | float = ..., -) -> _ArrayType: ... + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... max = amax min = amin diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 1d7ea3a2792e..12fdf677d0f5 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -29,8 +29,8 @@ def linspace( dtype: None = ..., axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... + device: L["cpu"] | None = ..., +) -> NDArray[floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -41,8 +41,20 @@ def linspace( dtype: None = ..., axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + device: L["cpu"] | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_SCT]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -50,10 +62,10 @@ def linspace( num: SupportsIndex = ..., endpoint: bool = ..., retstep: L[False] = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., + device: L["cpu"] | None = ..., ) -> NDArray[_SCT]: ... @overload def linspace( @@ -65,7 +77,7 @@ def linspace( dtype: DTypeLike = ..., axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = ..., ) -> NDArray[Any]: ... @overload def linspace( @@ -73,35 +85,35 @@ def linspace( stop: _ArrayLikeFloat_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., + *, + retstep: L[True], dtype: None = ..., axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[floating[Any]], floating[Any]]: ... + device: L["cpu"] | None = ..., +) -> tuple[NDArray[floating], floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., + *, + retstep: L[True], dtype: None = ..., axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., -) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ... + device: L["cpu"] | None = ..., +) -> tuple[NDArray[complexfloating], complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., - dtype: _DTypeLike[_SCT] = ..., - axis: SupportsIndex = ..., *, - device: None | L["cpu"] = ..., + retstep: L[True], + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., + device: L["cpu"] | None = ..., ) -> tuple[NDArray[_SCT], _SCT]: ... @overload def linspace( @@ -109,11 +121,11 @@ def linspace( stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - retstep: L[True] = ..., + *, + retstep: L[True], dtype: DTypeLike = ..., axis: SupportsIndex = ..., - *, - device: None | L["cpu"] = ..., + device: L["cpu"] | None = ..., ) -> tuple[NDArray[Any], Any]: ... @overload @@ -125,7 +137,7 @@ def logspace( base: _ArrayLikeFloat_co = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -135,7 +147,17 @@ def logspace( base: _ArrayLikeComplex_co = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + base: _ArrayLikeComplex_co, + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -143,7 +165,8 @@ def logspace( num: SupportsIndex = ..., endpoint: bool = ..., base: _ArrayLikeComplex_co = ..., - dtype: _DTypeLike[_SCT] = ..., + *, + dtype: _DTypeLike[_SCT], axis: SupportsIndex = ..., ) -> NDArray[_SCT]: ... @overload @@ -165,7 +188,7 @@ def geomspace( endpoint: bool = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[floating[Any]]: ... +) -> NDArray[floating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -174,14 +197,24 @@ def geomspace( endpoint: bool = ..., dtype: None = ..., axis: SupportsIndex = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +) -> NDArray[complexfloating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[_SCT], + axis: SupportsIndex = ..., +) -> NDArray[_SCT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, num: SupportsIndex = ..., endpoint: bool = ..., - dtype: _DTypeLike[_SCT] = ..., + *, + dtype: _DTypeLike[_SCT], axis: SupportsIndex = ..., ) -> NDArray[_SCT]: ... @overload diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index d32d71adc5dd..ec40c290f59a 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1335,6 +1335,7 @@ python_sources = [ 'strings.py', 'strings.pyi', 'umath.py', + 'umath.pyi', ] py.install_sources( diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index d97ee6e4f649..7966d9ac118b 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -47,6 +47,13 @@ from numpy import ( _OrderKACF, _OrderCF, ) +from .fromnumeric import ( + all as all, + any as any, + argpartition as argpartition, + matrix_transpose as matrix_transpose, + mean as mean, +) from .multiarray import ( # re-exports arange, diff --git a/numpy/_core/umath.pyi b/numpy/_core/umath.pyi new file mode 100644 index 000000000000..d9f0d384cf6d --- /dev/null +++ b/numpy/_core/umath.pyi @@ -0,0 +1,197 @@ +from numpy import ( + absolute, + add, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + bitwise_and, + bitwise_count, + bitwise_or, + bitwise_xor, + cbrt, + ceil, + conj, + conjugate, + copysign, + cos, + cosh, + deg2rad, + degrees, + divide, + divmod, + e, + equal, + euler_gamma, + exp, + exp2, + expm1, + fabs, + float_power, + floor, + floor_divide, + fmax, + fmin, + fmod, + frexp, + frompyfunc, + gcd, + greater, + greater_equal, + heaviside, + hypot, + invert, + isfinite, + isinf, + isnan, + isnat, + lcm, + ldexp, + left_shift, + less, + less_equal, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + matvec, + maximum, + minimum, + mod, + modf, + multiply, + negative, + nextafter, + not_equal, + pi, + positive, + power, + rad2deg, + radians, + reciprocal, + remainder, + right_shift, + rint, + sign, + signbit, + sin, + sinh, + spacing, + sqrt, + square, + subtract, + tan, + tanh, + true_divide, + trunc, + vecdot, + vecmat, +) + +__all__ = [ + "absolute", + "add", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "bitwise_and", + "bitwise_count", + "bitwise_or", + "bitwise_xor", + "cbrt", + "ceil", + "conj", + "conjugate", + "copysign", + "cos", + "cosh", + "deg2rad", + "degrees", + "divide", + "divmod", + "e", + "equal", + "euler_gamma", + "exp", + "exp2", + "expm1", + "fabs", + "float_power", + "floor", + "floor_divide", + "fmax", + "fmin", + "fmod", + "frexp", + "frompyfunc", + "gcd", + "greater", + "greater_equal", + "heaviside", + "hypot", + "invert", + "isfinite", + "isinf", + "isnan", + "isnat", + "lcm", + "ldexp", + "left_shift", + "less", + "less_equal", + "log", + "log1p", + "log2", + "log10", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "matvec", + "maximum", + "minimum", + "mod", + "modf", + "multiply", + "negative", + "nextafter", + "not_equal", + "pi", + "positive", + "power", + "rad2deg", + "radians", + "reciprocal", + "remainder", + "right_shift", + "rint", + "sign", + "signbit", + "sin", + "sinh", + "spacing", + "sqrt", + "square", + "subtract", + "tan", + "tanh", + "true_divide", + "trunc", + "vecdot", + "vecmat", +] diff --git a/numpy/lib/_arrayterator_impl.pyi b/numpy/lib/_arrayterator_impl.pyi index 58875b3c9301..c24fe56ac8a9 100644 --- a/numpy/lib/_arrayterator_impl.pyi +++ b/numpy/lib/_arrayterator_impl.pyi @@ -1,51 +1,46 @@ +# pyright: reportIncompatibleMethodOverride=false + from collections.abc import Generator from types import EllipsisType -from typing import ( - Any, - TypeAlias, - TypeVar, - overload, -) +from typing import Any, Final, TypeAlias, overload + +from typing_extensions import TypeVar -from numpy import ndarray, dtype, generic -from numpy._typing import DTypeLike, NDArray, _Shape as _AnyShape +import numpy as np __all__ = ["Arrayterator"] -# TODO: Rename to ``_ShapeType`` -_Shape = TypeVar("_Shape", bound=_AnyShape) -_DType = TypeVar("_DType", bound=dtype[Any]) -_ScalarType = TypeVar("_ScalarType", bound=generic) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) -_Index: TypeAlias = ( - EllipsisType - | int - | slice - | tuple[EllipsisType | int | slice, ...] -) +_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, # but its ``__getattr__` method does wrap around the former and thus has # access to all its methods -class Arrayterator(ndarray[_Shape, _DType]): - var: ndarray[_Shape, _DType] # type: ignore[assignment] - buf_size: None | int - start: list[int] - stop: list[int] - step: list[int] +class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): + var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment] + buf_size: Final[int | None] + start: Final[list[int]] + stop: Final[list[int]] + step: Final[list[int]] @property # type: ignore[misc] - def shape(self) -> tuple[int, ...]: ... + def shape(self) -> _ShapeT_co: ... @property - def flat(self: NDArray[_ScalarType]) -> Generator[_ScalarType, None, None]: ... - def __init__( - self, var: ndarray[_Shape, _DType], buf_size: None | int = ... - ) -> None: ... - @overload - def __array__(self, dtype: None = ..., copy: None | bool = ...) -> ndarray[_AnyShape, _DType]: ... + def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + + # + def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[tuple[int, ...], _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[tuple[int, ...], _DTypeT_co]]: ... + + # + @overload # type: ignore[override] + def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __array__(self, dtype: DTypeLike, copy: None | bool = ...) -> NDArray[Any]: ... - def __getitem__(self, index: _Index) -> Arrayterator[_AnyShape, _DType]: ... - def __iter__(self) -> Generator[ndarray[_AnyShape, _DType], None, None]: ... + def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... From c694399cda60883d1d088b74e72475b72508c349 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 15 Mar 2025 12:52:07 -0600 Subject: [PATCH 136/187] TYP: Backport typing fixes from main (2) (#28533) * TYP: don't use literals in shape-types Partial backport of numpy/numtype#122 and numpy/numtype#152 * TYP: annotate the missing ``ufunc.resolve_dtypes`` method Ported from numpy/numtype#218 * TYP: stub ``numpy._core.overrides`` Ported from numpy/numtype#226 * TYP: stub ``numpy._utils`` Ported from numpy/numtype#225 * TYP: stub ``numpy._core._dtype[_ctypes]`` Ported from numpy/numtype#227 * TYP: stub the remaining ``numpy._core.*`` modules Ported from numpy/numtype#241 * TYP: stub the missing submodules of ``numpy.linalg`` Ported from numpy/numtype#248 * TYP: stub ``numpy._pyinstaller`` Ported from numpy/numtype#264 * TYP: stub ``numpy.fft.helper`` (deprecated) Ported from numpy/numtype#261 * TYP: annotate the missing deprecated ``row_stack`` function Ported from numpy/numtype#223 --------- Co-authored-by: jorenham --- numpy/__init__.pyi | 15 +- numpy/_core/_add_newdocs.pyi | 3 + numpy/_core/_add_newdocs_scalars.pyi | 16 ++ numpy/_core/_dtype.pyi | 58 +++++++ numpy/_core/_dtype_ctypes.pyi | 83 +++++++++++ numpy/_core/_exceptions.pyi | 73 +++++++++ numpy/_core/_machar.pyi | 73 +++++++++ numpy/_core/_methods.pyi | 24 +++ numpy/_core/_simd.pyi | 25 ++++ numpy/_core/_string_helpers.pyi | 12 ++ numpy/_core/fromnumeric.pyi | 19 +-- numpy/_core/meson.build | 11 ++ numpy/_core/multiarray.pyi | 88 ++++++----- numpy/_core/numeric.pyi | 34 ++--- numpy/_core/overrides.pyi | 50 +++++++ numpy/_core/printoptions.pyi | 28 ++++ numpy/_pyinstaller/__init__.pyi | 0 numpy/_pyinstaller/hook-numpy.pyi | 13 ++ numpy/_utils/__init__.pyi | 31 ++++ numpy/_utils/_convertions.pyi | 4 + numpy/_utils/_inspect.pyi | 71 +++++++++ numpy/_utils/_pep440.pyi | 121 +++++++++++++++ numpy/core/_dtype.pyi | 0 numpy/core/_dtype_ctypes.pyi | 0 numpy/core/overrides.pyi | 7 + numpy/fft/_helper.pyi | 57 +++---- numpy/fft/helper.pyi | 22 +++ numpy/fft/meson.build | 1 + numpy/lib/_shape_base_impl.pyi | 27 ++-- numpy/linalg/_linalg.pyi | 7 +- numpy/linalg/_umath_linalg.pyi | 61 ++++++++ numpy/linalg/lapack_lite.pyi | 141 ++++++++++++++++++ numpy/linalg/linalg.pyi | 69 +++++++++ numpy/linalg/meson.build | 3 + numpy/matlib.pyi | 2 +- .../typing/tests/data/reveal/fromnumeric.pyi | 10 +- 36 files changed, 1131 insertions(+), 128 deletions(-) create mode 100644 numpy/_core/_add_newdocs.pyi create mode 100644 numpy/_core/_add_newdocs_scalars.pyi create mode 100644 numpy/_core/_dtype.pyi create mode 100644 numpy/_core/_dtype_ctypes.pyi create mode 100644 numpy/_core/_exceptions.pyi create mode 100644 numpy/_core/_machar.pyi create mode 100644 numpy/_core/_methods.pyi create mode 100644 numpy/_core/_simd.pyi create mode 100644 numpy/_core/_string_helpers.pyi create mode 100644 numpy/_core/overrides.pyi create mode 100644 numpy/_core/printoptions.pyi create mode 100644 numpy/_pyinstaller/__init__.pyi create mode 100644 numpy/_pyinstaller/hook-numpy.pyi create mode 100644 numpy/_utils/__init__.pyi create mode 100644 numpy/_utils/_convertions.pyi create mode 100644 numpy/_utils/_inspect.pyi create mode 100644 numpy/_utils/_pep440.pyi create mode 100644 numpy/core/_dtype.pyi create mode 100644 numpy/core/_dtype_ctypes.pyi create mode 100644 numpy/core/overrides.pyi create mode 100644 numpy/fft/helper.pyi create mode 100644 numpy/linalg/_umath_linalg.pyi create mode 100644 numpy/linalg/lapack_lite.pyi create mode 100644 numpy/linalg/linalg.pyi diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7f6bd9db55a4..527f71d897ee 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -574,6 +574,7 @@ from numpy.lib._polynomial_impl import ( from numpy.lib._shape_base_impl import ( column_stack, + row_stack, dstack, array_split, split, @@ -730,10 +731,9 @@ __all__ = [ # noqa: RUF022 "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", "triu_indices_from", # lib._shape_base_impl.__all__ - # NOTE: `row_stack` is omitted because it is deprecated "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", - "take_along_axis", "put_along_axis", + "take_along_axis", "put_along_axis", "row_stack", # lib._type_check_impl.__all__ "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", "real_if_close", "typename", "mintypecode", "common_type", @@ -4803,6 +4803,17 @@ class ufunc: # outputs, so we can't type it very precisely. def at(self, /, *args: Any, **kwargs: Any) -> None: ... + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype[Any] | type | None, ...], + *, + signature: tuple[dtype[Any] | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: builtins.bool = False, + ) -> tuple[dtype[Any], ...]: ... + # Parameters: `__name__`, `ntypes` and `identity` absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] diff --git a/numpy/_core/_add_newdocs.pyi b/numpy/_core/_add_newdocs.pyi new file mode 100644 index 000000000000..b23c3b1adedd --- /dev/null +++ b/numpy/_core/_add_newdocs.pyi @@ -0,0 +1,3 @@ +from .overrides import get_array_function_like_doc as get_array_function_like_doc + +def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... diff --git a/numpy/_core/_add_newdocs_scalars.pyi b/numpy/_core/_add_newdocs_scalars.pyi new file mode 100644 index 000000000000..4a06c9b07d74 --- /dev/null +++ b/numpy/_core/_add_newdocs_scalars.pyi @@ -0,0 +1,16 @@ +from collections.abc import Iterable +from typing import Final + +import numpy as np + +possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ... +_system: Final[str] = ... +_machine: Final[str] = ... +_doc_alias_string: Final[str] = ... +_bool_docstring: Final[str] = ... +int_name: str = ... +float_name: str = ... + +def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... +def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/numpy/_core/_dtype.pyi b/numpy/_core/_dtype.pyi new file mode 100644 index 000000000000..c3e966e3f517 --- /dev/null +++ b/numpy/_core/_dtype.pyi @@ -0,0 +1,58 @@ +from typing import Any, Final, TypeAlias, TypedDict, overload, type_check_only +from typing import Literal as L + +from typing_extensions import ReadOnly, TypeVar + +import numpy as np + +### + +_T = TypeVar("_T") + +_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] + +@type_check_only +class _KindToStemType(TypedDict): + u: ReadOnly[L["uint"]] + i: ReadOnly[L["int"]] + c: ReadOnly[L["complex"]] + f: ReadOnly[L["float"]] + b: ReadOnly[L["bool"]] + V: ReadOnly[L["void"]] + O: ReadOnly[L["object"]] + M: ReadOnly[L["datetime"]] + m: ReadOnly[L["timedelta"]] + S: ReadOnly[L["bytes"]] + U: ReadOnly[L["str"]] + +### + +_kind_to_stem: Final[_KindToStemType] = ... + +# +def _kind_name(dtype: np.dtype[Any]) -> _Name: ... +def __str__(dtype: np.dtype[Any]) -> str: ... +def __repr__(dtype: np.dtype[Any]) -> str: ... + +# +def _isunsized(dtype: np.dtype[Any]) -> bool: ... +def _is_packed(dtype: np.dtype[Any]) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype[Any]) -> bool: ... + +# +def _construction_repr(dtype: np.dtype[Any], include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype[Any], short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype[Any]) -> str: ... +def _datetime_metadata_str(dtype: np.dtype[Any]) -> str: ... +def _struct_dict_str(dtype: np.dtype[Any], includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype[Any]) -> str: ... +def _struct_str(dtype: np.dtype[Any], include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype[Any]) -> str: ... +def _name_get(dtype: np.dtype[Any]) -> str: ... + +# +@overload +def _unpack_field(dtype: np.dtype[Any], offset: int, title: _T) -> tuple[np.dtype[Any], int, _T]: ... +@overload +def _unpack_field(dtype: np.dtype[Any], offset: int, title: None = None) -> tuple[np.dtype[Any], int, None]: ... +def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/numpy/_core/_dtype_ctypes.pyi b/numpy/_core/_dtype_ctypes.pyi new file mode 100644 index 000000000000..69438a2c1b4c --- /dev/null +++ b/numpy/_core/_dtype_ctypes.pyi @@ -0,0 +1,83 @@ +import _ctypes +import ctypes as ct +from typing import Any, overload + +import numpy as np + +# +@overload +def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... + +# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see +# https://github.com/numpy/numpy/issues/28360 + +# +def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ... +def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ... +def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ... + +# keep in sync with `dtype_from_ctypes_type` (minus the first overload) +@overload +def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... diff --git a/numpy/_core/_exceptions.pyi b/numpy/_core/_exceptions.pyi new file mode 100644 index 000000000000..5abfc779c212 --- /dev/null +++ b/numpy/_core/_exceptions.pyi @@ -0,0 +1,73 @@ +from collections.abc import Iterable +from typing import Any, Final, overload + +from typing_extensions import TypeVar, Unpack + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype[Any], ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype[Any], np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype[Any]] + to: Final[np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype[Any] + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/_machar.pyi b/numpy/_core/_machar.pyi new file mode 100644 index 000000000000..5abfc779c212 --- /dev/null +++ b/numpy/_core/_machar.pyi @@ -0,0 +1,73 @@ +from collections.abc import Iterable +from typing import Any, Final, overload + +from typing_extensions import TypeVar, Unpack + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, Unpack[tuple[Any, ...]]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype[Any], ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype[Any], np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype[Any]]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype[Any]] + to: Final[np.dtype[Any]] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype[Any], to: np.dtype[Any]) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__( + self, + /, + ufunc: np.ufunc, + casting: _CastingKind, + from_: np.dtype[Any], + to: np.dtype[Any], + i: int, + ) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype[Any] + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype[Any]) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/numpy/_core/_methods.pyi b/numpy/_core/_methods.pyi new file mode 100644 index 000000000000..45e2b8b9f761 --- /dev/null +++ b/numpy/_core/_methods.pyi @@ -0,0 +1,24 @@ +from collections.abc import Callable +from typing import Any, TypeAlias + +from typing_extensions import Concatenate + +import numpy as np + +from . import _exceptions as _exceptions + +### + +_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] + +### + +bool_dt: np.dtype[np.bool] = ... +umr_maximum: _Reduce2 = ... +umr_minimum: _Reduce2 = ... +umr_sum: _Reduce2 = ... +umr_prod: _Reduce2 = ... +umr_bitwise_count = np.bitwise_count +umr_any: _Reduce2 = ... +umr_all: _Reduce2 = ... +_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ... diff --git a/numpy/_core/_simd.pyi b/numpy/_core/_simd.pyi new file mode 100644 index 000000000000..70bb7077797e --- /dev/null +++ b/numpy/_core/_simd.pyi @@ -0,0 +1,25 @@ +from types import ModuleType +from typing import TypedDict, type_check_only + +# NOTE: these 5 are only defined on systems with an intel processor +SSE42: ModuleType | None = ... +FMA3: ModuleType | None = ... +AVX2: ModuleType | None = ... +AVX512F: ModuleType | None = ... +AVX512_SKX: ModuleType | None = ... + +baseline: ModuleType | None = ... + +@type_check_only +class SimdTargets(TypedDict): + SSE42: ModuleType | None + AVX2: ModuleType | None + FMA3: ModuleType | None + AVX512F: ModuleType | None + AVX512_SKX: ModuleType | None + baseline: ModuleType | None + +targets: SimdTargets = ... + +def clear_floatstatus() -> None: ... +def get_floatstatus() -> int: ... diff --git a/numpy/_core/_string_helpers.pyi b/numpy/_core/_string_helpers.pyi new file mode 100644 index 000000000000..6a85832b7a93 --- /dev/null +++ b/numpy/_core/_string_helpers.pyi @@ -0,0 +1,12 @@ +from typing import Final + +_all_chars: Final[tuple[str, ...]] = ... +_ascii_upper: Final[tuple[str, ...]] = ... +_ascii_lower: Final[tuple[str, ...]] = ... + +LOWER_TABLE: Final[tuple[str, ...]] = ... +UPPER_TABLE: Final[tuple[str, ...]] = ... + +def english_lower(s: str) -> str: ... +def english_upper(s: str) -> str: ... +def english_capitalize(s: str) -> str: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index f7207db17b0c..3de05f3db362 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -108,7 +108,6 @@ __all__ = [ _SCT = TypeVar("_SCT", bound=generic) _SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_) _ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) -_SizeType = TypeVar("_SizeType", bound=int) _ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) _ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) @@ -456,23 +455,19 @@ def searchsorted( sorter: _ArrayLikeInt_co | None = ..., # 1D int array ) -> NDArray[intp]: ... -# unlike `reshape`, `resize` only accepts positive integers, so literal ints can be used +# @overload -def resize(a: _ArrayLike[_SCT], new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[_SCT]]: ... +def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... @overload -def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ... +def resize(a: _ArrayLike[_SCT], new_shape: _AnyShapeType) -> np.ndarray[_AnyShapeType, np.dtype[_SCT]]: ... @overload -def resize(a: _ArrayLike[_SCT], new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[_SCT]]: ... +def resize(a: _ArrayLike[_SCT], new_shape: _ShapeLike) -> NDArray[_SCT]: ... @overload -def resize(a: _ArrayLike[_SCT], new_shape: Sequence[SupportsIndex]) -> NDArray[_SCT]: ... +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[Any]]: ... @overload -def resize(a: ArrayLike, new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[Any]]: ... +def resize(a: ArrayLike, new_shape: _AnyShapeType) -> np.ndarray[_AnyShapeType, np.dtype[Any]]: ... @overload -def resize(a: ArrayLike, new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[Any]]: ... -@overload -def resize(a: ArrayLike, new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[Any]]: ... -@overload -def resize(a: ArrayLike, new_shape: Sequence[SupportsIndex]) -> NDArray[Any]: ... +def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... @overload def squeeze( diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index ec40c290f59a..6b6fbd3490ae 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -1290,17 +1290,26 @@ python_sources = [ '__init__.py', '__init__.pyi', '_add_newdocs.py', + '_add_newdocs.pyi', '_add_newdocs_scalars.py', + '_add_newdocs_scalars.pyi', '_asarray.py', '_asarray.pyi', '_dtype.py', + '_dtype.pyi', '_dtype_ctypes.py', + '_dtype_ctypes.pyi', '_exceptions.py', + '_exceptions.pyi', '_internal.py', '_internal.pyi', '_machar.py', + '_machar.pyi', '_methods.py', + '_methods.pyi', + '_simd.pyi', '_string_helpers.py', + '_string_helpers.pyi', '_type_aliases.py', '_type_aliases.pyi', '_ufunc_config.py', @@ -1327,7 +1336,9 @@ python_sources = [ 'numerictypes.py', 'numerictypes.pyi', 'overrides.py', + 'overrides.pyi', 'printoptions.py', + 'printoptions.pyi', 'records.py', 'records.pyi', 'shape_base.py', diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 28cf5411645f..b656472dfec7 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -49,6 +49,7 @@ from numpy import ( # type: ignore[attr-defined] signedinteger, floating, complexfloating, + _AnyShapeType, _OrderKACF, _OrderCF, _CastingKind, @@ -191,8 +192,6 @@ __all__ = [ "zeros", ] -_T_co = TypeVar("_T_co", covariant=True) -_T_contra = TypeVar("_T_contra", contravariant=True) _SCT = TypeVar("_SCT", bound=generic) _DType = TypeVar("_DType", bound=np.dtype[Any]) _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) @@ -206,10 +205,9 @@ _IDType = TypeVar("_IDType") _Nin = TypeVar("_Nin", bound=int) _Nout = TypeVar("_Nout", bound=int) -_SizeType = TypeVar("_SizeType", bound=int) -_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) -_1DArray: TypeAlias = ndarray[tuple[_SizeType], dtype[_SCT]] -_Array: TypeAlias = ndarray[_ShapeType, dtype[_SCT]] +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_Array: TypeAlias = ndarray[_ShapeT, dtype[_SCT]] +_Array1D: TypeAlias = ndarray[tuple[int], dtype[_SCT]] # Valid time units _UnitKind: TypeAlias = L[ @@ -250,70 +248,78 @@ class _ConstructorEmpty(Protocol): # 1-D shape @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[tuple[_SizeType], float64]: ... + ) -> _Array1D[float64]: ... @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[tuple[_SizeType], _DType]: ... + ) -> ndarray[tuple[int], _DType]: ... @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[tuple[_SizeType], _SCT]: ... + ) -> _Array1D[_SCT]: ... @overload def __call__( - self, /, - shape: _SizeType, + self, + /, + shape: SupportsIndex, dtype: DTypeLike, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[tuple[_SizeType], Any]: ... + ) -> _Array1D[Any]: ... # known shape @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_ShapeType, float64]: ... + ) -> _Array[_AnyShapeType, float64]: ... @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> ndarray[_ShapeType, _DType]: ... + ) -> ndarray[_AnyShapeType, _DType]: ... @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_ShapeType, _SCT]: ... + ) -> _Array[_AnyShapeType, _SCT]: ... @overload def __call__( - self, /, - shape: _ShapeType, + self, + /, + shape: _AnyShapeType, dtype: DTypeLike, order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], - ) -> _Array[_ShapeType, Any]: ... + ) -> _Array[_AnyShapeType, Any]: ... # unknown shape @overload @@ -1036,7 +1042,7 @@ def arange( # type: ignore[misc] dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, signedinteger[Any]]: ... +) -> _Array1D[signedinteger]: ... @overload def arange( # type: ignore[misc] start: _IntLike_co, @@ -1046,7 +1052,7 @@ def arange( # type: ignore[misc] *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, signedinteger[Any]]: ... +) -> _Array1D[signedinteger]: ... @overload def arange( # type: ignore[misc] stop: _FloatLike_co, @@ -1054,7 +1060,7 @@ def arange( # type: ignore[misc] dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, floating[Any]]: ... +) -> _Array1D[floating]: ... @overload def arange( # type: ignore[misc] start: _FloatLike_co, @@ -1064,7 +1070,7 @@ def arange( # type: ignore[misc] *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, floating[Any]]: ... +) -> _Array1D[floating]: ... @overload def arange( stop: _TD64Like_co, @@ -1072,7 +1078,7 @@ def arange( dtype: None = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, timedelta64]: ... +) -> _Array1D[timedelta64]: ... @overload def arange( start: _TD64Like_co, @@ -1082,7 +1088,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, timedelta64]: ... +) -> _Array1D[timedelta64]: ... @overload def arange( # both start and stop must always be specified for datetime64 start: datetime64, @@ -1092,7 +1098,7 @@ def arange( # both start and stop must always be specified for datetime64 *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, datetime64]: ... +) -> _Array1D[datetime64]: ... @overload def arange( stop: Any, @@ -1100,7 +1106,7 @@ def arange( dtype: _DTypeLike[_SCT], device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, _SCT]: ... +) -> _Array1D[_SCT]: ... @overload def arange( start: Any, @@ -1110,7 +1116,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, _SCT]: ... +) -> _Array1D[_SCT]: ... @overload def arange( stop: Any, /, @@ -1118,7 +1124,7 @@ def arange( dtype: DTypeLike, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, Any]: ... +) -> _Array1D[Any]: ... @overload def arange( start: Any, @@ -1128,7 +1134,7 @@ def arange( *, device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., -) -> _1DArray[int, Any]: ... +) -> _Array1D[Any]: ... def datetime_data( dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index 7966d9ac118b..a5af4d372968 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -44,6 +44,7 @@ from numpy import ( float64, timedelta64, object_, + _AnyShapeType, _OrderKACF, _OrderCF, ) @@ -190,7 +191,6 @@ _T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) _DType = TypeVar("_DType", bound=np.dtype[Any]) _ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any]) -_SizeType = TypeVar("_SizeType", bound=int) _ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...]) _CorrelateMode: TypeAlias = L["valid", "same", "full"] @@ -303,69 +303,69 @@ def ones_like( # 1-D shape @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: _SCT, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[_SizeType], _SCT]: ... +) -> _Array[tuple[int], _SCT]: ... @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: Any, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[tuple[_SizeType], _DType]: ... +) -> np.ndarray[tuple[int], _DType]: ... @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: Any, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[_SizeType], _SCT]: ... +) -> _Array[tuple[int], _SCT]: ... @overload def full( - shape: _SizeType, + shape: SupportsIndex, fill_value: Any, dtype: None | DTypeLike = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[tuple[_SizeType], Any]: ... +) -> _Array[tuple[int], Any]: ... # known shape @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: _SCT, dtype: None = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_ShapeType, _SCT]: ... +) -> _Array[_AnyShapeType, _SCT]: ... @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: Any, dtype: _DType | _SupportsDType[_DType], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> np.ndarray[_ShapeType, _DType]: ... +) -> np.ndarray[_AnyShapeType, _DType]: ... @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: Any, dtype: type[_SCT], order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_ShapeType, _SCT]: ... +) -> _Array[_AnyShapeType, _SCT]: ... @overload def full( - shape: _ShapeType, + shape: _AnyShapeType, fill_value: Any, dtype: None | DTypeLike = ..., order: _OrderCF = ..., **kwargs: Unpack[_KwargsEmpty], -) -> _Array[_ShapeType, Any]: ... +) -> _Array[_AnyShapeType, Any]: ... # unknown shape @overload def full( diff --git a/numpy/_core/overrides.pyi b/numpy/_core/overrides.pyi new file mode 100644 index 000000000000..9babbcc26a0b --- /dev/null +++ b/numpy/_core/overrides.pyi @@ -0,0 +1,50 @@ +from collections.abc import Callable, Iterable +from typing import Any, Final, NamedTuple + +from typing_extensions import ParamSpec, TypeVar + +from numpy._typing import _SupportsArrayFunc + +_T = TypeVar("_T") +_Tss = ParamSpec("_Tss") +_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) + +### + +ARRAY_FUNCTIONS: set[Callable[..., Any]] = ... +array_function_like_doc: Final[str] = ... + +class ArgSpec(NamedTuple): + args: list[str] + varargs: str | None + keywords: str | None + defaults: tuple[Any, ...] + +def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... + +# +def verify_matching_signatures( + implementation: Callable[_Tss, object], + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], +) -> None: ... + +# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with +# the original wrapped callable stored in the `._implementation` attribute. It checks +# for any `__array_function__` of the values of specific arguments that the dispatcher +# specifies. Since the dispatcher only returns an iterable of passed array-like args, +# this overridable behaviour is impossible to annotate. +def array_function_dispatch( + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = False, +) -> Callable[[_FuncT], _FuncT]: ... + +# +def array_function_from_dispatcher( + implementation: Callable[_Tss, _T], + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = True, +) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... diff --git a/numpy/_core/printoptions.pyi b/numpy/_core/printoptions.pyi new file mode 100644 index 000000000000..bd7c7b40692d --- /dev/null +++ b/numpy/_core/printoptions.pyi @@ -0,0 +1,28 @@ +from collections.abc import Callable +from contextvars import ContextVar +from typing import Any, Final, TypedDict + +from .arrayprint import _FormatDict + +__all__ = ["format_options"] + +### + +class _FormatOptionsDict(TypedDict): + edgeitems: int + threshold: int + floatmode: str + precision: int + suppress: bool + linewidth: int + nanstr: str + infstr: str + sign: str + formatter: _FormatDict | None + legacy: int + override_repr: Callable[[Any], str] | None + +### + +default_format_options_dict: Final[_FormatOptionsDict] = ... +format_options: ContextVar[_FormatOptionsDict] diff --git a/numpy/_pyinstaller/__init__.pyi b/numpy/_pyinstaller/__init__.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/_pyinstaller/hook-numpy.pyi b/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 000000000000..2642996dad7e --- /dev/null +++ b/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,13 @@ +from typing import Final + +# from `PyInstaller.compat` +is_conda: Final[bool] +is_pure_conda: Final[bool] + +# from `PyInstaller.utils.hooks` +def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... + +binaries: Final[list[tuple[str, str]]] + +hiddenimports: Final[list[str]] +excludedimports: Final[list[str]] diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi new file mode 100644 index 000000000000..ced45bfdeb44 --- /dev/null +++ b/numpy/_utils/__init__.pyi @@ -0,0 +1,31 @@ +from collections.abc import Callable, Iterable +from typing import Protocol, overload, type_check_only + +from _typeshed import IdentityFunction +from typing_extensions import TypeVar + +from ._convertions import asbytes as asbytes +from ._convertions import asunicode as asunicode + +### + +_T = TypeVar("_T") +_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module(module: _HasModuleT) -> _HasModuleT: ... + +# +def _rename_parameter( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... diff --git a/numpy/_utils/_convertions.pyi b/numpy/_utils/_convertions.pyi new file mode 100644 index 000000000000..6cc599acc94f --- /dev/null +++ b/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/numpy/_utils/_inspect.pyi b/numpy/_utils/_inspect.pyi new file mode 100644 index 000000000000..ba0260d3a593 --- /dev/null +++ b/numpy/_utils/_inspect.pyi @@ -0,0 +1,71 @@ +import types +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, overload + +from _typeshed import SupportsLenAndGetItem +from typing_extensions import TypeIs, TypeVar + +__all__ = ["formatargspec", "getargspec"] + +### + +_T = TypeVar("_T") +_RT = TypeVar("_RT") + +_StrSeq: TypeAlias = SupportsLenAndGetItem[str] +_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] + +_JoinFunc: TypeAlias = Callable[[list[_T]], _T] +_FormatFunc: TypeAlias = Callable[[_T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/numpy/_utils/_pep440.pyi b/numpy/_utils/_pep440.pyi new file mode 100644 index 000000000000..29dd4c912aa9 --- /dev/null +++ b/numpy/_utils/_pep440.pyi @@ -0,0 +1,121 @@ +import re +from collections.abc import Callable +from typing import ( + Any, + ClassVar, + Final, + Generic, + NamedTuple, + TypeVar, + final, + type_check_only, +) +from typing import ( + Literal as L, +) + +from typing_extensions import TypeIs + +__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"] + +### + +_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...]) +_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True) + +### + +VERSION_PATTERN: Final[str] = ... + +class InvalidVersion(ValueError): ... + +@type_check_only +@final +class _InfinityType: + def __hash__(self) -> int: ... + def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ... + def __ne__(self, other: object, /) -> bool: ... + def __lt__(self, other: object, /) -> L[False]: ... + def __le__(self, other: object, /) -> L[False]: ... + def __gt__(self, other: object, /) -> L[True]: ... + def __ge__(self, other: object, /) -> L[True]: ... + def __neg__(self) -> _NegativeInfinityType: ... + +Infinity: Final[_InfinityType] = ... + +@type_check_only +@final +class _NegativeInfinityType: + def __hash__(self) -> int: ... + def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ... + def __ne__(self, other: object, /) -> bool: ... + def __lt__(self, other: object, /) -> L[True]: ... + def __le__(self, other: object, /) -> L[True]: ... + def __gt__(self, other: object, /) -> L[False]: ... + def __ge__(self, other: object, /) -> L[False]: ... + def __neg__(self) -> _InfinityType: ... + +NegativeInfinity: Final[_NegativeInfinityType] = ... + +class _Version(NamedTuple): + epoch: int + release: tuple[int, ...] + dev: tuple[str, int] | None + pre: tuple[str, int] | None + post: tuple[str, int] | None + local: tuple[str | int, ...] | None + +class _BaseVersion(Generic[_CmpKeyT_co]): + _key: _CmpKeyT_co + def __hash__(self) -> int: ... + def __eq__(self, other: _BaseVersion, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: _BaseVersion, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __lt__(self, other: _BaseVersion, /) -> bool: ... + def __le__(self, other: _BaseVersion, /) -> bool: ... + def __ge__(self, other: _BaseVersion, /) -> bool: ... + def __gt__(self, other: _BaseVersion, /) -> bool: ... + def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ... + +class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]): + _version: Final[str] + def __init__(self, /, version: str) -> None: ... + @property + def public(self) -> str: ... + @property + def base_version(self) -> str: ... + @property + def local(self) -> None: ... + @property + def is_prerelease(self) -> L[False]: ... + @property + def is_postrelease(self) -> L[False]: ... + +class Version( + _BaseVersion[ + tuple[ + int, # epoch + tuple[int, ...], # release + tuple[str, int] | _InfinityType | _NegativeInfinityType, # pre + tuple[str, int] | _NegativeInfinityType, # post + tuple[str, int] | _InfinityType, # dev + tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType, # local + ], + ], +): + _regex: ClassVar[re.Pattern[str]] = ... + _version: Final[str] + + def __init__(self, /, version: str) -> None: ... + @property + def public(self) -> str: ... + @property + def base_version(self) -> str: ... + @property + def local(self) -> str | None: ... + @property + def is_prerelease(self) -> bool: ... + @property + def is_postrelease(self) -> bool: ... + +# +def parse(version: str) -> Version | LegacyVersion: ... diff --git a/numpy/core/_dtype.pyi b/numpy/core/_dtype.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/core/_dtype_ctypes.pyi b/numpy/core/_dtype_ctypes.pyi new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/numpy/core/overrides.pyi b/numpy/core/overrides.pyi new file mode 100644 index 000000000000..fab3512626f8 --- /dev/null +++ b/numpy/core/overrides.pyi @@ -0,0 +1,7 @@ +# NOTE: At runtime, this submodule dynamically re-exports any `numpy._core.overrides` +# member, and issues a `DeprecationWarning` when accessed. But since there is no +# `__dir__` or `__all__` present, these annotations would be unverifiable. Because +# this module is also deprecated in favor of `numpy._core`, and therefore not part of +# the public API, we omit the "re-exports", which in practice would require literal +# duplication of the stubs in order for the `@deprecated` decorator to be understood +# by type-checkers. diff --git a/numpy/fft/_helper.pyi b/numpy/fft/_helper.pyi index 5cb28db2239e..7673c1800a92 100644 --- a/numpy/fft/_helper.pyi +++ b/numpy/fft/_helper.pyi @@ -1,51 +1,38 @@ -from typing import Any, TypeVar, overload, Literal as L +from typing import Any, Final, TypeVar, overload +from typing import Literal as L -from numpy import generic, integer, floating, complexfloating -from numpy._typing import ( - NDArray, - ArrayLike, - _ShapeLike, - _ArrayLike, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, -) +from numpy import complexfloating, floating, generic, integer +from numpy._typing import ArrayLike, NDArray, _ArrayLike, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _ShapeLike -__all__ = ["fftshift", "ifftshift", "fftfreq", "rfftfreq"] +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] _SCT = TypeVar("_SCT", bound=generic) +### + +integer_types: Final[tuple[type[int], type[integer]]] = ... + +### + @overload -def fftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def fftshift(x: _ArrayLike[_SCT], axes: _ShapeLike | None = None) -> NDArray[_SCT]: ... @overload -def fftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def ifftshift(x: _ArrayLike[_SCT], axes: None | _ShapeLike = ...) -> NDArray[_SCT]: ... +def ifftshift(x: _ArrayLike[_SCT], axes: _ShapeLike | None = None) -> NDArray[_SCT]: ... @overload -def ifftshift(x: ArrayLike, axes: None | _ShapeLike = ...) -> NDArray[Any]: ... +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +# @overload -def fftfreq( - n: int | integer[Any], - d: _ArrayLikeFloat_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... +def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... @overload -def fftfreq( - n: int | integer[Any], - d: _ArrayLikeComplex_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... +# @overload -def rfftfreq( - n: int | integer[Any], - d: _ArrayLikeFloat_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[floating[Any]]: ... +def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... @overload -def rfftfreq( - n: int | integer[Any], - d: _ArrayLikeComplex_co = ..., - device: None | L["cpu"] = ..., -) -> NDArray[complexfloating[Any, Any]]: ... +def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... diff --git a/numpy/fft/helper.pyi b/numpy/fft/helper.pyi new file mode 100644 index 000000000000..887cbe7e27c9 --- /dev/null +++ b/numpy/fft/helper.pyi @@ -0,0 +1,22 @@ +from typing import Any +from typing import Literal as L + +from typing_extensions import deprecated + +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ShapeLike + +from ._helper import integer_types as integer_types + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +### + +@deprecated("Please use `numpy.fft.fftshift` instead.") +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.ifftshift` instead.") +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.fftfreq` instead.") +def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.rfftfreq` instead.") +def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index 751b5dc74d30..e18949af5e31 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -24,6 +24,7 @@ py.install_sources( '_helper.py', '_helper.pyi', 'helper.py', + 'helper.pyi', ], subdir: 'numpy/fft' ) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index 5439c533edff..77e5d2de9cb9 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -10,20 +10,13 @@ from typing import ( type_check_only, ) +from typing_extensions import deprecated + import numpy as np -from numpy import ( - generic, - integer, - ufunc, - unsignedinteger, - signedinteger, - floating, - complexfloating, - object_, -) -from numpy._core.shape_base import vstack as row_stack +from numpy import _CastingKind, generic, integer, ufunc, unsignedinteger, signedinteger, floating, complexfloating, object_ from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _ShapeLike, _ArrayLike, @@ -72,6 +65,8 @@ class _SupportsArrayWrap(Protocol): @property def __array_wrap__(self) -> _ArrayWrap: ... +### + def take_along_axis( arr: _SCT | NDArray[_SCT], indices: NDArray[integer[Any]], @@ -119,6 +114,16 @@ def expand_dims( axis: _ShapeLike, ) -> NDArray[Any]: ... +# Deprecated in NumPy 2.0, 2023-08-18 +@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") +def row_stack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> NDArray[Any]: ... + +# @overload def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... @overload diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index 8ac75a47f5f8..9f646ec94037 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -16,7 +16,6 @@ from numpy import ( vecdot, # other - generic, floating, complexfloating, signedinteger, @@ -79,13 +78,13 @@ __all__ = [ "vecdot", ] -_T = TypeVar("_T") _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) -_SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) -_2Tuple: TypeAlias = tuple[_T, _T] _ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] +### + +fortran_int = np.intc class EigResult(NamedTuple): eigenvalues: NDArray[Any] diff --git a/numpy/linalg/_umath_linalg.pyi b/numpy/linalg/_umath_linalg.pyi new file mode 100644 index 000000000000..cd07acdb1f9e --- /dev/null +++ b/numpy/linalg/_umath_linalg.pyi @@ -0,0 +1,61 @@ +from typing import Final +from typing import Literal as L + +import numpy as np +from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 + +__version__: Final[str] = ... +_ilp64: Final[bool] = ... + +### +# 1 -> 1 + +# (m,m) -> () +det: Final[np.ufunc] = ... +# (m,m) -> (m) +cholesky_lo: Final[np.ufunc] = ... +cholesky_up: Final[np.ufunc] = ... +eigvals: Final[np.ufunc] = ... +eigvalsh_lo: Final[np.ufunc] = ... +eigvalsh_up: Final[np.ufunc] = ... +# (m,m) -> (m,m) +inv: Final[np.ufunc] = ... +# (m,n) -> (p) +qr_r_raw: Final[np.ufunc] = ... +svd: Final[np.ufunc] = ... + +### +# 1 -> 2 + +# (m,m) -> (), () +slogdet: Final[np.ufunc] = ... +# (m,m) -> (m), (m,m) +eig: Final[np.ufunc] = ... +eigh_lo: Final[np.ufunc] = ... +eigh_up: Final[np.ufunc] = ... + +### +# 2 -> 1 + +# (m,n), (n) -> (m,m) +qr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ... +# (m,n), (k) -> (m,k) +qr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ... +# (m,m), (m,n) -> (m,n) +solve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ... +# (m,m), (m) -> (m) +solve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ... + +### +# 1 -> 3 + +# (m,n) -> (m,m), (p), (n,n) +svd_f: Final[np.ufunc] = ... +# (m,n) -> (m,p), (p), (p,n) +svd_s: Final[np.ufunc] = ... + +### +# 3 -> 4 + +# (m,n), (m,k), () -> (n,k), (k), (), (p) +lstsq: Final[np.ufunc] = ... diff --git a/numpy/linalg/lapack_lite.pyi b/numpy/linalg/lapack_lite.pyi new file mode 100644 index 000000000000..0f6bfa3a022b --- /dev/null +++ b/numpy/linalg/lapack_lite.pyi @@ -0,0 +1,141 @@ +from typing import Any, Final, TypedDict, type_check_only + +import numpy as np +from numpy._typing import NDArray + +from ._linalg import fortran_int + +### + +@type_check_only +class _GELSD(TypedDict): + m: int + n: int + nrhs: int + lda: int + ldb: int + rank: int + lwork: int + info: int + +@type_check_only +class _DGELSD(_GELSD): + dgelsd_: int + rcond: float + +@type_check_only +class _ZGELSD(_GELSD): + zgelsd_: int + +@type_check_only +class _GEQRF(TypedDict): + m: int + n: int + lda: int + lwork: int + info: int + +@type_check_only +class _DGEQRF(_GEQRF): + dgeqrf_: int + +@type_check_only +class _ZGEQRF(_GEQRF): + zgeqrf_: int + +@type_check_only +class _DORGQR(TypedDict): + dorgqr_: int + info: int + +@type_check_only +class _ZUNGQR(TypedDict): + zungqr_: int + info: int + +### + +_ilp64: Final[bool] = ... + +def dgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.float64], + lda: int, + b: NDArray[np.float64], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.float64], + lwork: int, + iwork: NDArray[fortran_int], + info: int, +) -> _DGELSD: ... +def zgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.complex128], + lda: int, + b: NDArray[np.complex128], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.complex128], + lwork: int, + rwork: NDArray[np.float64], + iwork: NDArray[fortran_int], + info: int, +) -> _ZGELSD: ... + +# +def dgeqrf( + m: int, + n: int, + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.float64], # out, shape: (min(m, n),) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DGEQRF: ... +def zgeqrf( + m: int, + n: int, + a: NDArray[np.complex128], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.complex128], # out, shape: (min(m, n),) + work: NDArray[np.complex128], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _ZGEQRF: ... + +# +def dorgqr( + m: int, # >=0 + n: int, # m >= n >= 0 + k: int, # n >= k >= 0 + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, # >= max(1, m) + tau: NDArray[np.float64], # in, shape: (k,) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DORGQR: ... +def zungqr( + m: int, + n: int, + k: int, + a: NDArray[np.complex128], + lda: int, + tau: NDArray[np.complex128], + work: NDArray[np.complex128], + lwork: int, + info: int, +) -> _ZUNGQR: ... + +# +def xerbla(srname: object, info: int) -> None: ... diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi new file mode 100644 index 000000000000..dbe9becfb8d5 --- /dev/null +++ b/numpy/linalg/linalg.pyi @@ -0,0 +1,69 @@ +from ._linalg import ( + LinAlgError, + cholesky, + cond, + cross, + det, + diagonal, + eig, + eigh, + eigvals, + eigvalsh, + inv, + lstsq, + matmul, + matrix_norm, + matrix_power, + matrix_rank, + matrix_transpose, + multi_dot, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, + trace, + vecdot, + vector_norm, +) + +__all__ = [ + "LinAlgError", + "cholesky", + "cond", + "cross", + "det", + "diagonal", + "eig", + "eigh", + "eigvals", + "eigvalsh", + "inv", + "lstsq", + "matmul", + "matrix_norm", + "matrix_power", + "matrix_rank", + "matrix_transpose", + "multi_dot", + "norm", + "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", + "tensordot", + "tensorinv", + "tensorsolve", + "trace", + "vecdot", + "vector_norm", +] diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index 740c9f56c6fa..e2f8136208d6 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -45,7 +45,10 @@ py.install_sources( '__init__.pyi', '_linalg.py', '_linalg.pyi', + '_umath_linalg.pyi', + 'lapack_lite.pyi', 'linalg.py', + 'linalg.pyi', ], subdir: 'numpy/linalg' ) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index 67b753a87c32..cce0bf5b0541 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -394,7 +394,7 @@ from numpy import ( roots, rot90, round, - # row_stack, + row_stack, s_, save, savetxt, diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 40bb578d0d46..366e34d8af99 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -102,11 +102,11 @@ assert_type(np.searchsorted(AR_f4[0], 0), np.intp) assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) -assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.bool]]) -assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.float32]]) -assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[Any]]) -assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.bool]]) -assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[L[5], L[5]], np.dtype[np.float32]]) +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype[Any]]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) assert_type(np.squeeze(b), np.bool) assert_type(np.squeeze(f4), np.float32) From 29929338af26f8d3ed444364fdbf520a9954f2c9 Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 14 Mar 2025 23:00:42 +0100 Subject: [PATCH 137/187] TYP: fix stubtest errors in ``numpy.dtype`` and ``numpy.dtypes.*`` Ported from numpy/numtype#229 --- numpy/__init__.pyi | 15 ++++++++++++++- numpy/dtypes.pyi | 45 +++++++++++++++++++++++---------------------- 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 527f71d897ee..f8c499410993 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1196,8 +1196,21 @@ __future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... __array_api_version__: Final[L["2023.12"]] = "2023.12" test: Final[PytestTester] = ... +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... + @final -class dtype(Generic[_SCT_co]): +class dtype(Generic[_SCT_co], metaclass=_DTypeMeta): names: None | tuple[builtins.str, ...] def __hash__(self) -> int: ... diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 5cb345035f2c..11e5611653fa 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,18 +1,13 @@ -from typing import ( - Any, - Final, - Generic, - Literal as L, - NoReturn, - TypeAlias, - final, - type_check_only, -) +# ruff: noqa: ANN401 +from types import MemberDescriptorType +from typing import Any, ClassVar, Generic, NoReturn, TypeAlias, final, type_check_only +from typing import Literal as L + from typing_extensions import LiteralString, Self, TypeVar import numpy as np -__all__ = [ +__all__ = [ # noqa: RUF022 'BoolDType', 'Int8DType', 'ByteDType', @@ -53,7 +48,7 @@ __all__ = [ _SCT_co = TypeVar("_SCT_co", bound=np.generic, covariant=True) @type_check_only -class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] +class _SimpleDType(np.dtype[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] names: None # pyright: ignore[reportIncompatibleVariableOverride] def __new__(cls, /) -> Self: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @@ -73,7 +68,7 @@ class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] def subdtype(self) -> None: ... @type_check_only -class _LiteralDType(Generic[_SCT_co], _SimpleDType[_SCT_co]): # type: ignore[misc] +class _LiteralDType(_SimpleDType[_SCT_co], Generic[_SCT_co]): # type: ignore[misc] @property def flags(self) -> L[0]: ... @property @@ -234,10 +229,11 @@ class UInt64DType( # type: ignore[misc] def str(self) -> L["u8"]: ... # Standard C-named version/alias: -ByteDType: Final = Int8DType -UByteDType: Final = UInt8DType -ShortDType: Final = Int16DType -UShortDType: Final = UInt16DType +# NOTE: Don't make these `Final`: it will break stubtest +ByteDType = Int8DType +UByteDType = UInt8DType +ShortDType = Int16DType +UShortDType = UInt16DType @final class IntDType( # type: ignore[misc] @@ -419,11 +415,11 @@ class ObjectDType( # type: ignore[misc] @final class BytesDType( # type: ignore[misc] - Generic[_ItemSize_co], _TypeCodes[L["S"], L["S"], L[18]], _NoOrder, _NBit[L[1],_ItemSize_co], _SimpleDType[np.bytes_], + Generic[_ItemSize_co], ): def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... @property @@ -435,11 +431,11 @@ class BytesDType( # type: ignore[misc] @final class StrDType( # type: ignore[misc] - Generic[_ItemSize_co], _TypeCodes[L["U"], L["U"], L[19]], _NativeOrder, _NBit[L[4],_ItemSize_co], _SimpleDType[np.str_], + Generic[_ItemSize_co], ): def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... @property @@ -451,11 +447,11 @@ class StrDType( # type: ignore[misc] @final class VoidDType( # type: ignore[misc] - Generic[_ItemSize_co], _TypeCodes[L["V"], L["V"], L[20]], _NoOrder, _NBit[L[1], _ItemSize_co], - np.dtype[np.void], + np.dtype[np.void], # pyright: ignore[reportGeneralTypeIssues] + Generic[_ItemSize_co], ): # NOTE: `VoidDType(...)` raises a `TypeError` at the moment def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... @@ -578,8 +574,13 @@ class StringDType( # type: ignore[misc] _NativeOrder, _NBit[L[8], L[16]], # TODO: Replace the (invalid) `str` with the scalar type, once implemented - np.dtype[str], # type: ignore[type-var] + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues,reportInvalidTypeArguments] ): + @property + def coerce(self) -> L[True]: ... + na_object: ClassVar[MemberDescriptorType] # does not get instantiated + + # def __new__(cls, /) -> StringDType: ... def __getitem__(self, key: Any, /) -> NoReturn: ... @property From 79b8c26a0ae7b2b1eda1a8b23ded33f16b07d19c Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 14 Mar 2025 23:08:23 +0100 Subject: [PATCH 138/187] TYP: fix stubtest errors in ``timedelta64`` and ``object_`` Ported from numpy/numtype#228 --- the following methods were missing according to stubtest: - `numpy.object_.__call__` - `numpy.timedelta64.__class_getitem__` --- numpy/__init__.pyi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index f8c499410993..7806724d707b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3990,7 +3990,7 @@ bool_ = bool # NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't # be made generic. @final -class object_(_RealMixin, generic): +class object_(_RealMixin, generic[Any]): @overload def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] @overload @@ -4004,6 +4004,8 @@ class object_(_RealMixin, generic): @overload # catch-all def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] def __init__(self, value: object = ..., /) -> None: ... + def __hash__(self, /) -> int: ... + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... if sys.version_info >= (3, 12): def __release_buffer__(self, buffer: memoryview, /) -> None: ... @@ -4466,6 +4468,9 @@ class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co] @overload def __init__(self, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> None: ... + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + # NOTE: Only a limited number of units support conversion # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` def __int__(self: timedelta64[int], /) -> int: ... From 4a20c5154b7cca9a627df298f8ab6aefbdea0acd Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 14 Mar 2025 23:41:42 +0100 Subject: [PATCH 139/187] TYP: fix stubtest errors in ``numpy.lib._function_base_impl`` Ported from numpy/numtype#233 --- This fixes the signatures of the following public `numpy` members: - `average` - `ma.average` - `median` - `corrcoef` --- numpy/lib/_function_base_impl.pyi | 185 +++++++++++++++--------------- numpy/ma/extras.pyi | 3 +- 2 files changed, 95 insertions(+), 93 deletions(-) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 214ad1f04f4b..e98dcbb7e741 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,57 +1,60 @@ -from collections.abc import Sequence, Callable, Iterable +# ruff: noqa: ANN401 +from collections.abc import Callable, Iterable, Sequence from typing import ( - Concatenate, - Literal as L, Any, + Concatenate, ParamSpec, - TypeAlias, - TypeVar, - overload, Protocol, SupportsIndex, SupportsInt, - TypeGuard, - type_check_only + TypeAlias, + TypeVar, + overload, + type_check_only, ) -from typing_extensions import deprecated +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeIs, deprecated import numpy as np from numpy import ( - vectorize, + _OrderKACF, + bool_, + complex128, + complexfloating, + datetime64, + float64, + floating, generic, integer, - floating, - complexfloating, intp, - float64, - complex128, - timedelta64, - datetime64, object_, - bool_, - _OrderKACF, + timedelta64, + vectorize, ) from numpy._core.multiarray import bincount +from numpy._globals import _NoValueType from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, + NDArray, _ArrayLike, - _DTypeLike, - _ShapeLike, _ArrayLikeBool_co, - _ArrayLikeInt_co, - _ArrayLikeFloat_co, _ArrayLikeComplex_co, - _ArrayLikeNumber_co, - _ArrayLikeTD64_co, _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, _ArrayLikeObject_co, - _FloatLike_co, + _ArrayLikeTD64_co, _ComplexLike_co, + _DTypeLike, + _FloatLike_co, + _NestedSequence, _NumberLike_co, _ScalarLike_co, - _NestedSequence + _ShapeLike, ) __all__ = [ @@ -106,12 +109,14 @@ _2Tuple: TypeAlias = tuple[_T, _T] @type_check_only class _TrimZerosSequence(Protocol[_T_co]): - def __len__(self) -> int: ... + def __len__(self, /) -> int: ... @overload def __getitem__(self, key: int, /) -> object: ... @overload def __getitem__(self, key: slice, /) -> _T_co: ... +### + @overload def rot90( m: _ArrayLike[_SCT], @@ -134,72 +139,62 @@ def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: . @overload def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ... -def iterable(y: object) -> TypeGuard[Iterable[Any]]: ... +def iterable(y: object) -> TypeIs[Iterable[Any]]: ... @overload def average( a: _ArrayLikeFloat_co, - axis: None = ..., - weights: None | _ArrayLikeFloat_co= ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> floating[Any]: ... -@overload -def average( - a: _ArrayLikeComplex_co, - axis: None = ..., - weights: None | _ArrayLikeComplex_co = ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> complexfloating[Any, Any]: ... -@overload -def average( - a: _ArrayLikeObject_co, - axis: None = ..., - weights: None | Any = ..., - returned: L[False] = ..., - keepdims: L[False] = ..., -) -> Any: ... + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> floating: ... @overload def average( a: _ArrayLikeFloat_co, - axis: None = ..., - weights: None | _ArrayLikeFloat_co= ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[floating[Any]]: ... + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[floating]: ... @overload def average( a: _ArrayLikeComplex_co, - axis: None = ..., - weights: None | _ArrayLikeComplex_co = ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[complexfloating[Any, Any]]: ... + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> complexfloating: ... @overload def average( - a: _ArrayLikeObject_co, - axis: None = ..., - weights: None | Any = ..., - returned: L[True] = ..., - keepdims: L[False] = ..., -) -> _2Tuple[Any]: ... + a: _ArrayLikeComplex_co, + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[complexfloating]: ... @overload def average( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - weights: None | Any = ..., - returned: L[False] = ..., - keepdims: bool = ..., -) -> Any: ... + axis: _ShapeLike | None = None, + weights: object | None = None, + *, + returned: L[True], + keepdims: bool | bool_ | _NoValueType = ..., +) -> _2Tuple[Incomplete]: ... @overload def average( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None | _ShapeLike = ..., - weights: None | Any = ..., - returned: L[True] = ..., - keepdims: bool = ..., -) -> _2Tuple[Any]: ... + axis: _ShapeLike | None = None, + weights: object | None = None, + returned: bool | bool_ = False, + *, + keepdims: bool | bool_ | _NoValueType = ..., +) -> Incomplete: ... @overload def asarray_chkfinite( @@ -478,38 +473,46 @@ def cov( dtype: DTypeLike, ) -> NDArray[Any]: ... -# NOTE `bias` and `ddof` have been deprecated +# NOTE `bias` and `ddof` are deprecated and ignored @overload def corrcoef( m: _ArrayLikeFloat_co, - y: None | _ArrayLikeFloat_co = ..., - rowvar: bool = ..., + y: _ArrayLikeFloat_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, - dtype: None = ..., -) -> NDArray[floating[Any]]: ... + dtype: None = None, +) -> NDArray[floating]: ... @overload def corrcoef( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, - dtype: None = ..., -) -> NDArray[complexfloating[Any, Any]]: ... + dtype: None = None, +) -> NDArray[complexfloating]: ... @overload def corrcoef( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, dtype: _DTypeLike[_SCT], ) -> NDArray[_SCT]: ... @overload def corrcoef( m: _ArrayLikeComplex_co, - y: None | _ArrayLikeComplex_co = ..., - rowvar: bool = ..., + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., *, - dtype: DTypeLike, + dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ... @@ -581,7 +584,6 @@ def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike, out: _ArrayType, - /, overwrite_input: bool = ..., keepdims: bool = ..., ) -> _ArrayType: ... @@ -749,7 +751,6 @@ def percentile( q: _ArrayLikeFloat_co, axis: None | _ShapeLike, out: _ArrayType, - /, overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: bool = ..., diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index df69cd5d3465..580309cc679d 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,6 +1,8 @@ from typing import Any +from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator + from .core import dot, mask_rowcols __all__ = [ @@ -88,7 +90,6 @@ diagflat: _fromnxfunction_single def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... def apply_over_axes(func, a, axes): ... -def average(a, axis=..., weights=..., returned=..., keepdims=...): ... def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... def compress_nd(x, axis=...): ... def compress_rowcols(x, axis=...): ... From b6de917f8bc88b9d6240d68764429b3c9d108f16 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Mar 2025 01:10:10 +0100 Subject: [PATCH 140/187] TYP: fix stubtest errors in ``numpy.lib._index_tricks_impl`` Ported from numpy/numtype#235 --- - move `ndenumerate` and `ndindex` definitions to `lib._index_tricks_impl` - add deprecated `ndenumerate.ndincr` property - removed non-existent `ndenumerate.iter` property - remove incorrect "pass" and "reveal" type-tests for `ndenumerate.iter` - fix incorrect `ndenumerate` constructor fallback return type - fix `AxisConcatenator.makemat` signature --- numpy/__init__.pyi | 46 +-- numpy/lib/_index_tricks_impl.pyi | 261 ++++++++++-------- numpy/typing/tests/data/pass/index_tricks.py | 4 - .../typing/tests/data/reveal/index_tricks.pyi | 12 +- 4 files changed, 149 insertions(+), 174 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 7806724d707b..539c5fa53c24 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -515,6 +515,8 @@ from numpy.lib._histograms_impl import ( ) from numpy.lib._index_tricks_impl import ( + ndenumerate, + ndindex, ravel_multi_index, unravel_index, mgrid, @@ -4964,50 +4966,6 @@ class errstate: ) -> None: ... def __call__(self, func: _CallableT) -> _CallableT: ... -class ndenumerate(Generic[_SCT_co]): - @property - def iter(self) -> flatiter[NDArray[_SCT_co]]: ... - - @overload - def __new__( - cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]], - ) -> ndenumerate[_SCT]: ... - @overload - def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[str_]: ... - @overload - def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[bytes_]: ... - @overload - def __new__(cls, arr: builtins.bool | _NestedSequence[builtins.bool]) -> ndenumerate[np.bool]: ... - @overload - def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[int_]: ... - @overload - def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... - @overload - def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... - @overload - def __new__(cls, arr: object) -> ndenumerate[object_]: ... - - # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) - @overload - def __next__( - self: ndenumerate[np.bool | datetime64 | timedelta64 | number[Any] | flexible], - /, - ) -> tuple[_Shape, _SCT_co]: ... - @overload - def __next__(self: ndenumerate[object_], /) -> tuple[_Shape, Any]: ... - @overload - def __next__(self, /) -> tuple[_Shape, _SCT_co]: ... - - def __iter__(self) -> Self: ... - -class ndindex: - @overload - def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... - @overload - def __init__(self, *shape: SupportsIndex) -> None: ... - def __iter__(self) -> Self: ... - def __next__(self) -> _Shape: ... - # TODO: The type of each `__next__` and `iters` return-type depends # on the length and dtype of `args`; we can't describe this behavior yet # as we lack variadics (PEP 646). diff --git a/numpy/lib/_index_tricks_impl.pyi b/numpy/lib/_index_tricks_impl.pyi index bd508a8b5905..4a1426fd4d6c 100644 --- a/numpy/lib/_index_tricks_impl.pyi +++ b/numpy/lib/_index_tricks_impl.pyi @@ -1,45 +1,23 @@ from collections.abc import Sequence -from typing import ( - Any, - TypeVar, - Generic, - overload, - Literal, - SupportsIndex, -) +from typing import Any, ClassVar, Final, Generic, SupportsIndex, final, overload +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import Self, TypeVar, deprecated import numpy as np -from numpy import ( - # Circumvent a naming conflict with `AxisConcatenator.matrix` - matrix as _Matrix, - ndenumerate, - ndindex, - ndarray, - dtype, - str_, - bytes_, - int_, - float64, - complex128, -) +from numpy._core.multiarray import ravel_multi_index, unravel_index from numpy._typing import ( - # Arrays ArrayLike, - _NestedSequence, - _FiniteNestedSequence, NDArray, - - # DTypes - DTypeLike, - _SupportsDType, - - # Shapes + _FiniteNestedSequence, + _NestedSequence, _Shape, + _SupportsArray, + _SupportsDType, ) -from numpy._core.multiarray import unravel_index, ravel_multi_index - -__all__ = [ +__all__ = [ # noqa: RUF022 "ravel_multi_index", "unravel_index", "mgrid", @@ -56,114 +34,163 @@ __all__ = [ "diag_indices_from", ] +### + _T = TypeVar("_T") -_DType = TypeVar("_DType", bound=dtype[Any]) -_BoolType = TypeVar("_BoolType", Literal[True], Literal[False]) -_TupType = TypeVar("_TupType", bound=tuple[Any, ...]) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype[Any]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) -@overload -def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[_Shape, _DType], ...]: ... -@overload -def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ... -@overload -def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ... -@overload -def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... -@overload -def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ... -@overload -def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float64], ...]: ... -@overload -def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex128], ...]: ... +_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) +_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) +_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) +_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) + +### -class nd_grid(Generic[_BoolType]): - sparse: _BoolType - def __init__(self, sparse: _BoolType = ...) -> None: ... +class ndenumerate(Generic[_ScalarT_co]): + @overload + def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + @overload + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... @overload - def __getitem__( - self: nd_grid[Literal[False]], - key: slice | Sequence[slice], - ) -> NDArray[Any]: ... + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... @overload - def __getitem__( - self: nd_grid[Literal[True]], - key: slice | Sequence[slice], - ) -> tuple[NDArray[Any], ...]: ... + def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + @overload + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + @overload + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + @overload + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + @overload + def __new__(cls, arr: object) -> ndenumerate[Any]: ... -class MGridClass(nd_grid[Literal[False]]): - def __init__(self) -> None: ... + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], + /, + ) -> tuple[tuple[int, ...], _ScalarT_co]: ... + @overload + def __next__(self: ndenumerate[np.object_], /) -> tuple[tuple[int, ...], Any]: ... + @overload + def __next__(self, /) -> tuple[tuple[int, ...], _ScalarT_co]: ... + + # + def __iter__(self) -> Self: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, /, *shape: SupportsIndex) -> None: ... + + # + def __iter__(self) -> Self: ... + def __next__(self) -> tuple[int, ...]: ... -mgrid: MGridClass + # + @deprecated("Deprecated since 1.20.0.") + def ndincr(self, /) -> None: ... -class OGridClass(nd_grid[Literal[True]]): +class nd_grid(Generic[_BoolT_co]): + sparse: _BoolT_co + def __init__(self, sparse: _BoolT_co = ...) -> None: ... + @overload + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Any]: ... + @overload + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Any], ...]: ... + +@final +class MGridClass(nd_grid[L[False]]): + def __init__(self) -> None: ... + +@final +class OGridClass(nd_grid[L[True]]): def __init__(self) -> None: ... -ogrid: OGridClass +class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): + __slots__ = "axis", "matrix", "ndmin", "trans1d" + + makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype[Any]]]] -class AxisConcatenator: - axis: int - matrix: bool - ndmin: int - trans1d: int + axis: _AxisT_co + matrix: _MatrixT_co + ndmin: _NDMinT_co + trans1d: _Trans1DT_co + + # def __init__( self, - axis: int = ..., - matrix: bool = ..., - ndmin: int = ..., - trans1d: int = ..., + /, + axis: _AxisT_co = ..., + matrix: _MatrixT_co = ..., + ndmin: _NDMinT_co = ..., + trans1d: _Trans1DT_co = ..., ) -> None: ... + + # TODO(jorenham): annotate this + def __getitem__(self, key: Incomplete, /) -> Incomplete: ... + def __len__(self, /) -> L[0]: ... + + # @staticmethod @overload - def concatenate( # type: ignore[misc] - *a: ArrayLike, axis: SupportsIndex = ..., out: None = ... - ) -> NDArray[Any]: ... + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... @staticmethod @overload - def concatenate( - *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ... - ) -> _ArrayType: ... - @staticmethod - def makemat( - data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ... - ) -> _Matrix[Any, Any]: ... - - # TODO: Sort out this `__getitem__` method - def __getitem__(self, key: Any) -> Any: ... - -class RClass(AxisConcatenator): - axis: Literal[0] - matrix: Literal[False] - ndmin: Literal[1] - trans1d: Literal[-1] - def __init__(self) -> None: ... + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Any]: ... -r_: RClass - -class CClass(AxisConcatenator): - axis: Literal[-1] - matrix: Literal[False] - ndmin: Literal[2] - trans1d: Literal[0] - def __init__(self) -> None: ... +@final +class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + def __init__(self, /) -> None: ... -c_: CClass +@final +class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + def __init__(self, /) -> None: ... -class IndexExpression(Generic[_BoolType]): - maketuple: _BoolType - def __init__(self, maketuple: _BoolType) -> None: ... +class IndexExpression(Generic[_BoolT_co]): + maketuple: _BoolT_co + def __init__(self, maketuple: _BoolT_co) -> None: ... @overload - def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc] + def __getitem__(self, item: _TupleT) -> _TupleT: ... @overload - def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ... + def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... @overload - def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ... + def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + +@overload +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_Shape, _DTypeT], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... + +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... + +# +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... -index_exp: IndexExpression[Literal[True]] -s_: IndexExpression[Literal[False]] +# +mgrid: Final[MGridClass] = ... +ogrid: Final[OGridClass] = ... -def fill_diagonal(a: NDArray[Any], val: Any, wrap: bool = ...) -> None: ... -def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ... -def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ... +r_: Final[RClass] = ... +c_: Final[CClass] = ... -# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex` +index_exp: Final[IndexExpression[L[True]]] = ... +s_: Final[IndexExpression[L[False]]] = ... diff --git a/numpy/typing/tests/data/pass/index_tricks.py b/numpy/typing/tests/data/pass/index_tricks.py index 4c4c1195990a..dfc4ff2f314a 100644 --- a/numpy/typing/tests/data/pass/index_tricks.py +++ b/numpy/typing/tests/data/pass/index_tricks.py @@ -13,10 +13,6 @@ np.ndenumerate(AR_LIKE_f) np.ndenumerate(AR_LIKE_U) -np.ndenumerate(AR_i8).iter -np.ndenumerate(AR_LIKE_f).iter -np.ndenumerate(AR_LIKE_U).iter - next(np.ndenumerate(AR_i8)) next(np.ndenumerate(AR_LIKE_f)) next(np.ndenumerate(AR_LIKE_U)) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 1db10928d2f5..06071feddd79 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -18,23 +18,17 @@ AR_O: npt.NDArray[np.object_] assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) -assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[np.object_]) - -assert_type(np.ndenumerate(AR_i8).iter, np.flatiter[npt.NDArray[np.int64]]) -assert_type(np.ndenumerate(AR_LIKE_f).iter, np.flatiter[npt.NDArray[np.float64]]) -assert_type(np.ndenumerate(AR_LIKE_U).iter, np.flatiter[npt.NDArray[np.str_]]) -assert_type(np.ndenumerate(AR_LIKE_O).iter, np.flatiter[npt.NDArray[np.object_]]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) -# this fails due to an unknown mypy bug -# assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) -assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[np.object_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) From 710d3b1e9c4253579a199d9828537fabbab051b1 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Mar 2025 03:23:05 +0100 Subject: [PATCH 141/187] TYP: work around a quantum-entangled mypy issue, somehow --- numpy/_core/fromnumeric.pyi | 72 ++++++++++--------- .../typing/tests/data/reveal/fromnumeric.pyi | 5 +- 2 files changed, 41 insertions(+), 36 deletions(-) diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 3de05f3db362..52f48efa9345 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -3,7 +3,6 @@ from collections.abc import Sequence from typing import ( Any, Literal, - NoReturn, Protocol, SupportsIndex, TypeAlias, @@ -11,6 +10,8 @@ from typing import ( overload, type_check_only, ) + +from _typeshed import Incomplete from typing_extensions import Never, deprecated import numpy as np @@ -551,9 +552,6 @@ def ravel( @overload def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype[Any]]: ... -@overload -def nonzero(a: np.generic | np.ndarray[tuple[()], Any]) -> NoReturn: ... -@overload def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... # this prevents `Any` from being returned with Pyright @@ -813,7 +811,7 @@ def all( keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> np.bool | NDArray[np.bool]: ... +) -> Incomplete: ... @overload def all( a: ArrayLike, @@ -850,7 +848,7 @@ def any( keepdims: _BoolLike_co | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> np.bool | NDArray[np.bool]: ... +) -> Incomplete: ... @overload def any( a: ArrayLike, @@ -1443,10 +1441,10 @@ def mean( keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> complexfloating[Any, Any]: ... +) -> complexfloating[Any]: ... @overload def mean( - a: _ArrayLikeTD64_co, + a: _ArrayLike[np.timedelta64], axis: None = ..., dtype: None = ..., out: None = ..., @@ -1457,23 +1455,33 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: None = ..., - out: None = ..., + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, keepdims: bool | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: None, dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool | _NoValueType = ..., + keepdims: Literal[False] | _NoValueType = ..., *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _SCT | NDArray[_SCT]: ... +) -> _SCT: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, @@ -1487,43 +1495,43 @@ def mean( @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: None = ..., - *, + axis: _ShapeLike | None, dtype: _DTypeLike[_SCT], - out: None = ..., - keepdims: bool | _NoValueType = ..., + out: None, + keepdims: Literal[True, 1], + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _SCT | NDArray[_SCT]: ... +) -> NDArray[_SCT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., + axis: _ShapeLike | None, + dtype: _DTypeLike[_SCT], out: None = ..., - keepdims: bool | _NoValueType = ..., *, + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> Any: ... +) -> _SCT | NDArray[_SCT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, - axis: _ShapeLike | None, - dtype: DTypeLike, - out: _ArrayT, - keepdims: bool | _NoValueType = ..., + axis: _ShapeLike | None = ..., *, + dtype: _DTypeLike[_SCT], + out: None = ..., + keepdims: bool | _NoValueType = ..., where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> _SCT | NDArray[_SCT]: ... @overload def mean( a: _ArrayLikeComplex_co | _ArrayLikeObject_co, axis: _ShapeLike | None = ..., - dtype: DTypeLike = ..., - *, - out: _ArrayT, + dtype: DTypeLike | None = ..., + out: None = ..., keepdims: bool | _NoValueType = ..., + *, where: _ArrayLikeBool_co | _NoValueType = ..., -) -> _ArrayT: ... +) -> Incomplete: ... @overload def std( diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 366e34d8af99..7e778dc58410 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -1,6 +1,6 @@ """Tests for :mod:`_core.fromnumeric`.""" -from typing import Any, Literal as L, NoReturn +from typing import Any, Literal as L import numpy as np import numpy.typing as npt @@ -127,11 +127,8 @@ assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) -assert_type(np.nonzero(b), NoReturn) -assert_type(np.nonzero(f4), NoReturn) assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) -assert_type(np.nonzero(AR_0d), NoReturn) assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) From c80a57ffae3279936b76c4d56432709987d2922e Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 15 Mar 2025 02:13:11 +0100 Subject: [PATCH 142/187] TYP: fix stubtest errors in ``numpy.lib._twodim_base_impl`` Ported from numpy/numtype#245 --- This fixes incorrect parameter names of `tril` and `triu`, and resolves a typing error in the signatures of `eye` and `tri`. --- numpy/lib/_twodim_base_impl.pyi | 90 ++++++++++++++++----------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index e748e91fb908..5d3ea54511b8 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -10,7 +10,6 @@ from typing import ( import numpy as np from numpy import ( generic, - number, timedelta64, datetime64, int_, @@ -56,14 +55,28 @@ __all__ = [ "triu_indices_from", ] +### + _T = TypeVar("_T") _SCT = TypeVar("_SCT", bound=generic) +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) # The returned arrays dtype must be compatible with `np.equal` -_MaskFunc: TypeAlias = Callable[ - [NDArray[int_], _T], - NDArray[number[Any] | np.bool | timedelta64 | datetime64 | object_], -] +_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] + +### @overload def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @@ -87,13 +100,24 @@ def eye( like: None | _SupportsArrayFunc = ..., ) -> NDArray[float64]: ... @overload +def eye( + N: int, + M: None | int, + k: int, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., + *, + device: None | L["cpu"] = ..., + like: None | _SupportsArrayFunc = ..., +) -> NDArray[_SCT]: ... +@overload def eye( N: int, M: None | int = ..., k: int = ..., - dtype: _DTypeLike[_SCT] = ..., - order: _OrderCF = ..., *, + dtype: _DTypeLike[_SCT], + order: _OrderCF = ..., device: None | L["cpu"] = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[_SCT]: ... @@ -129,12 +153,21 @@ def tri( like: None | _SupportsArrayFunc = ... ) -> NDArray[float64]: ... @overload +def tri( + N: int, + M: None | int, + k: int, + dtype: _DTypeLike[_SCT], + *, + like: None | _SupportsArrayFunc = ... +) -> NDArray[_SCT]: ... +@overload def tri( N: int, M: None | int = ..., k: int = ..., - dtype: _DTypeLike[_SCT] = ..., *, + dtype: _DTypeLike[_SCT], like: None | _SupportsArrayFunc = ... ) -> NDArray[_SCT]: ... @overload @@ -148,14 +181,14 @@ def tri( ) -> NDArray[Any]: ... @overload -def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def tril(m: _ArrayLike[_SCT], k: int = 0) -> NDArray[_SCT]: ... @overload -def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload -def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ... +def triu(m: _ArrayLike[_SCT], k: int = 0) -> NDArray[_SCT]: ... @overload -def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... @overload def vander( # type: ignore[misc] @@ -182,38 +215,6 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... - -_Int_co: TypeAlias = np.integer[Any] | np.bool -_Float_co: TypeAlias = np.floating[Any] | _Int_co -_Number_co: TypeAlias = np.number[Any] | np.bool - -_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] -_ArrayLike2D: TypeAlias = ( - _SupportsArray[np.dtype[_SCT]] - | Sequence[_ArrayLike1D[_SCT]] -) - -_ArrayLike1DInt_co: TypeAlias = ( - _SupportsArray[np.dtype[_Int_co]] - | Sequence[int | _Int_co] -) -_ArrayLike1DFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[_Float_co]] - | Sequence[float | int | _Float_co] -) -_ArrayLike2DFloat_co: TypeAlias = ( - _SupportsArray[np.dtype[_Float_co]] - | Sequence[_ArrayLike1DFloat_co] -) -_ArrayLike1DNumber_co: TypeAlias = ( - _SupportsArray[np.dtype[_Number_co]] - | Sequence[int | float | complex | _Number_co] -) - -_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) -_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) -_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) - @overload def histogram2d( x: _ArrayLike1D[_SCT_complex], @@ -344,7 +345,6 @@ def histogram2d( NDArray[_SCT_number_co | complex128 | float64], NDArray[_SCT_number_co | complex128 | float64] , ]: ... - @overload def histogram2d( x: _ArrayLike1DNumber_co, From 8f561dbad1779ec9f78eaeeef30723d73e8f5b3c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:08:49 +0100 Subject: [PATCH 143/187] TYP: fix stubtest errors in ``numpy._core`` (#28535) * TYP: fix stubtest errors in ``numpy._core._internal`` Ported from numpy/numtype#238 * TYP: fix stubtest errors in ``numpy._core.einsumfunc`` Ported from numpy/numtype#239 * TYP: fix stubtest errors in ``numpy._core.arrayprint``` Ported from numpy/numtype#240 * TYP: fix remaining stubtest errors in ``numpy._core`` Ported from numpy/numtype#246 * TYP: fix incorrect warning ignore in "pass" type-tests --- numpy/_core/_internal.pyi | 64 +++++++++++++++---- numpy/_core/arrayprint.pyi | 4 +- numpy/_core/einsumfunc.pyi | 3 +- numpy/_core/multiarray.pyi | 3 +- numpy/_core/records.pyi | 41 +++++++----- numpy/typing/tests/data/fail/ndarray_misc.pyi | 5 -- numpy/typing/tests/data/pass/ndarray_misc.py | 13 ++++ 7 files changed, 98 insertions(+), 35 deletions(-) diff --git a/numpy/_core/_internal.pyi b/numpy/_core/_internal.pyi index 690554f66f94..15726fe3064e 100644 --- a/numpy/_core/_internal.pyi +++ b/numpy/_core/_internal.pyi @@ -1,23 +1,41 @@ -from typing import Any, TypeVar, overload, Generic import ctypes as ct +import re +from collections.abc import Callable, Iterable +from typing import Any, Final, Generic, overload -from numpy.typing import NDArray +from typing_extensions import Self, TypeVar, deprecated + +import numpy as np +import numpy.typing as npt from numpy.ctypeslib import c_intp -_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast` +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) +_T_co = TypeVar("_T_co", covariant=True) _CT = TypeVar("_CT", bound=ct._CData) -_PT = TypeVar("_PT", bound=int) +_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) + +### + +IS_PYPY: Final[bool] = ... + +format_re: Final[re.Pattern[str]] = ... +sep_re: Final[re.Pattern[str]] = ... +space_re: Final[re.Pattern[str]] = ... + +### # TODO: Let the likes of `shape_as` and `strides_as` return `None` # for 0D arrays once we've got shape-support -class _ctypes(Generic[_PT]): +class _ctypes(Generic[_PT_co]): @overload - def __new__(cls, array: NDArray[Any], ptr: None = ...) -> _ctypes[None]: ... + def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ... @overload - def __new__(cls, array: NDArray[Any], ptr: _PT) -> _ctypes[_PT]: ... + def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ... + + # @property - def data(self) -> _PT: ... + def data(self) -> _PT_co: ... @property def shape(self) -> ct.Array[c_intp]: ... @property @@ -25,6 +43,30 @@ class _ctypes(Generic[_PT]): @property def _as_parameter_(self) -> ct.c_void_p: ... - def data_as(self, obj: type[_CastT]) -> _CastT: ... - def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... - def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ... + # + def data_as(self, /, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + + # + @deprecated('"get_data" is deprecated. Use "data" instead') + def get_data(self, /) -> _PT_co: ... + @deprecated('"get_shape" is deprecated. Use "shape" instead') + def get_shape(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_strides" is deprecated. Use "strides" instead') + def get_strides(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') + def get_as_parameter(self, /) -> ct.c_void_p: ... + +class dummy_ctype(Generic[_T_co]): + _cls: type[_T_co] + + def __init__(self, /, cls: type[_T_co]) -> None: ... + def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __mul__(self, other: object, /) -> Self: ... + def __call__(self, /, *other: object) -> _T_co: ... + +def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... +def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... +def npy_ctypes_check(cls: type) -> bool: ... diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 661d58a22fe3..1f8be64d5e7b 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -8,6 +8,7 @@ from typing import Any, Final, Literal, SupportsIndex, TypeAlias, TypedDict, ove from typing_extensions import deprecated import numpy as np +from numpy._globals import _NoValueType from numpy._typing import NDArray, _CharLike_co, _FloatLike_co __all__ = [ @@ -93,13 +94,14 @@ def array2string( suppress_small: bool | None = None, separator: str = " ", prefix: str = "", - *, + style: _NoValueType = ..., formatter: _FormatDict | None = None, threshold: int | None = None, edgeitems: int | None = None, sign: _Sign | None = None, floatmode: _FloatMode | None = None, suffix: str = "", + *, legacy: _Legacy | None = None, ) -> str: ... @overload # style= (positional), legacy="1.13" diff --git a/numpy/_core/einsumfunc.pyi b/numpy/_core/einsumfunc.pyi index d7de9c02e16e..00629a478c25 100644 --- a/numpy/_core/einsumfunc.pyi +++ b/numpy/_core/einsumfunc.pyi @@ -180,5 +180,6 @@ def einsum_path( subscripts: str | _ArrayLikeInt_co, /, *operands: _ArrayLikeComplex_co | _DTypeLikeObject, - optimize: _OptimizeKind = ..., + optimize: _OptimizeKind = "greedy", + einsum_call: Literal[False] = False, ) -> tuple[list[Any], str]: ... diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index b656472dfec7..ea304c0789ab 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -355,7 +355,8 @@ class _ConstructorEmpty(Protocol): **kwargs: Unpack[_KwargsEmpty], ) -> NDArray[Any]: ... -error: Final = Exception +# using `Final` or `TypeAlias` will break stubtest +error = Exception # from ._multiarray_umath ITEM_HASOBJECT: Final[L[1]] diff --git a/numpy/_core/records.pyi b/numpy/_core/records.pyi index 308f96b7407b..b4ca5ff0e3bf 100644 --- a/numpy/_core/records.pyi +++ b/numpy/_core/records.pyi @@ -1,12 +1,13 @@ # ruff: noqa: ANN401 # pyright: reportSelfClsParameterName=false from collections.abc import Iterable, Sequence -from types import EllipsisType -from typing import Any, Literal, Protocol, SupportsIndex, TypeAlias, TypeVar, overload, type_check_only +from typing import Any, ClassVar, Literal, Protocol, SupportsIndex, TypeAlias, overload, type_check_only from _typeshed import StrOrBytesPath +from typing_extensions import TypeVar -from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer, dtype, generic, ndarray, void +import numpy as np +from numpy import _ByteOrder, _OrderKACF, _SupportsBuffer from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLikeVoid_co, _NestedSequence, _ShapeLike __all__ = [ @@ -22,11 +23,11 @@ __all__ = [ ] _T = TypeVar("_T") -_SCT = TypeVar("_SCT", bound=generic) -_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) +_SCT = TypeVar("_SCT", bound=np.generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype[Any], covariant=True) _ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) -_RecArray: TypeAlias = recarray[Any, dtype[_SCT]] +_RecArray: TypeAlias = recarray[Any, np.dtype[_SCT]] @type_check_only class _SupportsReadInto(Protocol): @@ -37,7 +38,7 @@ class _SupportsReadInto(Protocol): ### # exported in `numpy.rec` -class record(void): +class record(np.void): def __getattribute__(self, attr: str) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike) -> None: ... def pprint(self) -> str: ... @@ -47,10 +48,9 @@ class record(void): def __getitem__(self, key: list[str]) -> record: ... # exported in `numpy.rec` -class recarray(ndarray[_ShapeT_co, _DType_co]): - # NOTE: While not strictly mandatory, we're demanding here that arguments - # for the `format_parser`- and `dtype`-based dtype constructors are - # mutually exclusive +class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" @overload def __new__( subtype, @@ -66,7 +66,7 @@ class recarray(ndarray[_ShapeT_co, _DType_co]): byteorder: _ByteOrder | None = None, aligned: bool = False, order: _OrderKACF = "C", - ) -> recarray[Any, dtype[record]]: ... + ) -> _RecArray[record]: ... @overload def __new__( subtype, @@ -81,18 +81,20 @@ class recarray(ndarray[_ShapeT_co, _DType_co]): byteorder: None = None, aligned: Literal[False] = False, order: _OrderKACF = "C", - ) -> recarray[Any, dtype[Any]]: ... + ) -> _RecArray[Any]: ... def __array_finalize__(self, /, obj: object) -> None: ... def __getattribute__(self, attr: str, /) -> Any: ... def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... - @overload - def field(self, /, attr: int | str, val: None = None) -> Any: ... + + # @overload def field(self, /, attr: int | str, val: ArrayLike) -> None: ... + @overload + def field(self, /, attr: int | str, val: None = None) -> Any: ... # exported in `numpy.rec` class format_parser: - dtype: dtype[void] + dtype: np.dtype[np.void] def __init__( self, /, @@ -213,6 +215,7 @@ def array( dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, + strides: tuple[int, ...] | None = None, formats: None = None, names: None = None, titles: None = None, @@ -226,6 +229,7 @@ def array( dtype: DTypeLike, shape: _ShapeLike | None = None, offset: int = 0, + strides: tuple[int, ...] | None = None, formats: None = None, names: None = None, titles: None = None, @@ -239,6 +243,7 @@ def array( dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, + strides: tuple[int, ...] | None = None, *, formats: DTypeLike, names: str | Sequence[str] | None = None, @@ -253,6 +258,7 @@ def array( dtype: DTypeLike, shape: _ShapeLike, offset: int = 0, + strides: tuple[int, ...] | None = None, formats: None = None, names: None = None, titles: None = None, @@ -267,6 +273,7 @@ def array( *, shape: _ShapeLike, offset: int = 0, + strides: tuple[int, ...] | None = None, formats: DTypeLike, names: str | Sequence[str] | None = None, titles: str | Sequence[str] | None = None, @@ -280,6 +287,7 @@ def array( dtype: DTypeLike, shape: _ShapeLike | None = None, offset: int = 0, + strides: tuple[int, ...] | None = None, formats: None = None, names: None = None, titles: None = None, @@ -293,6 +301,7 @@ def array( dtype: None = None, shape: _ShapeLike | None = None, offset: int = 0, + strides: tuple[int, ...] | None = None, *, formats: DTypeLike, names: str | Sequence[str] | None = None, diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 674b378829a0..38729557b43e 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -16,11 +16,6 @@ AR_b: npt.NDArray[np.bool] ctypes_obj = AR_f8.ctypes -reveal_type(ctypes_obj.get_data()) # E: has no attribute -reveal_type(ctypes_obj.get_shape()) # E: has no attribute -reveal_type(ctypes_obj.get_strides()) # E: has no attribute -reveal_type(ctypes_obj.get_as_parameter()) # E: has no attribute - f8.argpartition(0) # E: has no attribute f8.diagonal() # E: has no attribute f8.dot(1) # E: has no attribute diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py index fef9d519b78b..758626e18dd6 100644 --- a/numpy/typing/tests/data/pass/ndarray_misc.py +++ b/numpy/typing/tests/data/pass/ndarray_misc.py @@ -24,6 +24,8 @@ class SubClass(npt.NDArray[np.float64]): ... C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) D = np.ones(3).view(SubClass) +ctypes_obj = A.ctypes + i4.all() A.all() A.all(axis=0) @@ -181,3 +183,14 @@ class SubClass(npt.NDArray[np.float64]): ... A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) A_void["yop"] = A_float[:, 0] A_void["yap"] = A_float[:, 1] + +# deprecated + +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_data() # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_shape() # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_strides() # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_as_parameter() # pyright: ignore[reportDeprecated] From 5bdbd6d1c0a5e934e1f38631ac2b84b4d3857cd5 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:10:32 +0100 Subject: [PATCH 144/187] TYP: fix stubtest errors in ``numpy._globals`` (#28536) Ported from numpy/numtype#249 --- numpy/_globals.pyi | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/numpy/_globals.pyi b/numpy/_globals.pyi index c6b17d68d6b2..b2231a9636b0 100644 --- a/numpy/_globals.pyi +++ b/numpy/_globals.pyi @@ -6,8 +6,10 @@ from typing import Final, final @final class _CopyMode(enum.Enum): ALWAYS = True - IF_NEEDED = False - NEVER = 2 + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... @final class _NoValueType: ... From ecf97ae7d381aeb917e4517721c8f981a77a153d Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:12:05 +0100 Subject: [PATCH 145/187] TYP: fix stubtest errors in ``numpy.mat[rix]lib`` (#28537) Ported from numpy/numtype#247 --- numpy/matlib.pyi | 8 ++++++++ numpy/matrixlib/defmatrix.pyi | 10 +++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/numpy/matlib.pyi b/numpy/matlib.pyi index cce0bf5b0541..c6a10c6327ef 100644 --- a/numpy/matlib.pyi +++ b/numpy/matlib.pyi @@ -70,6 +70,7 @@ from numpy import ( bitwise_invert, bitwise_left_shift, bitwise_not, + bitwise_or, bitwise_right_shift, bitwise_xor, blackman, @@ -89,6 +90,7 @@ from numpy import ( c_, can_cast, cbrt, + cdouble, ceil, char, character, @@ -160,6 +162,7 @@ from numpy import ( expand_dims, expm1, extract, + f2py, fabs, fft, fill_diagonal, @@ -209,6 +212,7 @@ from numpy import ( greater_equal, half, hamming, + hanning, heaviside, histogram, histogram2d, @@ -220,6 +224,7 @@ from numpy import ( i0, iinfo, imag, + in1d, index_exp, indices, inexact, @@ -425,9 +430,11 @@ from numpy import ( sort, sort_complex, spacing, + split, sqrt, square, squeeze, + stack, std, str_, strings, @@ -459,6 +466,7 @@ from numpy import ( trunc, typecodes, typename, + typing, ubyte, ufunc, uint, diff --git a/numpy/matrixlib/defmatrix.pyi b/numpy/matrixlib/defmatrix.pyi index 03476555e59e..a6095cc1155a 100644 --- a/numpy/matrixlib/defmatrix.pyi +++ b/numpy/matrixlib/defmatrix.pyi @@ -1,10 +1,10 @@ -from collections.abc import Sequence, Mapping +from collections.abc import Mapping, Sequence from typing import Any from numpy import matrix from numpy._typing import ArrayLike, DTypeLike, NDArray -__all__ = ["matrix", "bmat", "asmatrix"] +__all__ = ["asmatrix", "bmat", "matrix"] def bmat( obj: str | Sequence[ArrayLike] | NDArray[Any], @@ -12,6 +12,6 @@ def bmat( gdict: None | Mapping[str, Any] = ..., ) -> matrix[tuple[int, int], Any]: ... -def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[tuple[int, int], Any]: ... - -mat = asmatrix +def asmatrix( + data: ArrayLike, dtype: DTypeLike = ... +) -> matrix[tuple[int, int], Any]: ... From 2ff0b8870639ae2d81282159928030382c01acef Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:13:17 +0100 Subject: [PATCH 146/187] TYP: fix stubtest errors in ``numpy.random`` (#28538) Ported from numpy/numtype#251 --- numpy/random/bit_generator.pyi | 159 ++++++++++++++------------------- 1 file changed, 69 insertions(+), 90 deletions(-) diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 8dfbcd9909dd..78fb769683d3 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -1,128 +1,107 @@ import abc -from threading import Lock from collections.abc import Callable, Mapping, Sequence -from typing import ( - Any, - NamedTuple, - TypeAlias, - TypedDict, - TypeVar, - overload, - Literal, - type_check_only, -) - -from numpy import dtype, uint32, uint64 -from numpy._typing import ( - NDArray, - _ArrayLikeInt_co, - _ShapeLike, - _SupportsDType, - _UInt32Codes, - _UInt64Codes, -) - -_T = TypeVar("_T") - -_DTypeLikeUint32: TypeAlias = ( - dtype[uint32] - | _SupportsDType[dtype[uint32]] - | type[uint32] - | _UInt32Codes -) -_DTypeLikeUint64: TypeAlias = ( - dtype[uint64] - | _SupportsDType[dtype[uint64]] - | type[uint64] - | _UInt64Codes -) +from threading import Lock +from typing import Any, ClassVar, Literal, NamedTuple, TypeAlias, TypedDict, overload, type_check_only + +from _typeshed import Incomplete +from typing_extensions import CapsuleType, Self + +import numpy as np +from numpy._typing import NDArray, _ArrayLikeInt_co, _DTypeLike, _ShapeLike, _UInt32Codes, _UInt64Codes + +__all__ = ["BitGenerator", "SeedSequence"] + +### + +_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes @type_check_only class _SeedSeqState(TypedDict): - entropy: None | int | Sequence[int] + entropy: int | Sequence[int] | None spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int @type_check_only class _Interface(NamedTuple): - state_address: Any - state: Any - next_uint64: Any - next_uint32: Any - next_double: Any - bit_generator: Any + state_address: Incomplete + state: Incomplete + next_uint64: Incomplete + next_uint32: Incomplete + next_double: Incomplete + bit_generator: Incomplete + +@type_check_only +class _CythonMixin: + def __setstate_cython__(self, pyx_state: object, /) -> None: ... + def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + +@type_check_only +class _GenerateStateMixin(_CythonMixin): + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +### class ISeedSequence(abc.ABC): @abc.abstractmethod - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... -class ISpawnableSeedSequence(ISeedSequence): +class ISpawnableSeedSequence(ISeedSequence, abc.ABC): @abc.abstractmethod - def spawn(self: _T, n_children: int) -> list[_T]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedlessSeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + def spawn(self, /, n_children: int) -> list[Self]: ... -class SeedlessSeedSequence(ISpawnableSeedSequence): - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... - def spawn(self: _T, n_children: int) -> list[_T]: ... +class SeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + __pyx_vtable__: ClassVar[CapsuleType] = ... -class SeedSequence(ISpawnableSeedSequence): - entropy: None | int | Sequence[int] + entropy: int | Sequence[int] | None spawn_key: tuple[int, ...] pool_size: int n_children_spawned: int - pool: NDArray[uint32] + pool: NDArray[np.uint32] + def __init__( self, - entropy: None | int | Sequence[int] | _ArrayLikeInt_co = ..., + /, + entropy: _ArrayLikeInt_co | None = None, *, - spawn_key: Sequence[int] = ..., - pool_size: int = ..., + spawn_key: Sequence[int] = (), + pool_size: int = 4, n_children_spawned: int = ..., ) -> None: ... - def __repr__(self) -> str: ... + def spawn(self, /, n_children: int) -> list[Self]: ... @property - def state( - self, - ) -> _SeedSeqState: ... - def generate_state( - self, n_words: int, dtype: _DTypeLikeUint32 | _DTypeLikeUint64 = ... - ) -> NDArray[uint32 | uint64]: ... - def spawn(self, n_children: int) -> list[SeedSequence]: ... + def state(self) -> _SeedSeqState: ... -class BitGenerator(abc.ABC): +class BitGenerator(_CythonMixin, abc.ABC): lock: Lock - def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ... - def __setstate__( - self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence] - ) -> None: ... - def __reduce__( - self, - ) -> tuple[ - Callable[[str], BitGenerator], - tuple[str], - tuple[dict[str, Any], ISeedSequence] - ]: ... - @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... @state.setter - def state(self, value: Mapping[str, Any]) -> None: ... + def state(self, value: Mapping[str, Any], /) -> None: ... @property def seed_seq(self) -> ISeedSequence: ... - def spawn(self, n_children: int) -> list[BitGenerator]: ... - @overload - def random_raw(self, size: None = ..., output: Literal[True] = ...) -> int: ... # type: ignore[misc] - @overload - def random_raw(self, size: _ShapeLike = ..., output: Literal[True] = ...) -> NDArray[uint64]: ... # type: ignore[misc] - @overload - def random_raw(self, size: None | _ShapeLike = ..., output: Literal[False] = ...) -> None: ... # type: ignore[misc] - def _benchmark(self, cnt: int, method: str = ...) -> None: ... @property def ctypes(self) -> _Interface: ... @property def cffi(self) -> _Interface: ... + @property + def capsule(self) -> CapsuleType: ... + + # + def __init__(self, /, seed: _ArrayLikeInt_co | SeedSequence | None = None) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Self], tuple[str], tuple[Mapping[str, Any], ISeedSequence]]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + def _benchmark(self, /, cnt: int, method: str = "uint64") -> None: ... + + # + @overload + def random_raw(self, /, size: None = None, output: Literal[True] = True) -> int: ... + @overload + def random_raw(self, /, size: _ShapeLike, output: Literal[True] = True) -> NDArray[np.uint64]: ... + @overload + def random_raw(self, /, size: _ShapeLike | None, output: Literal[False]) -> None: ... + @overload + def random_raw(self, /, size: _ShapeLike | None = None, *, output: Literal[False]) -> None: ... From bed0064322e5f5c19fa108f7fe8a16cc9e141f2c Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:20:20 +0100 Subject: [PATCH 147/187] TYP: fix stubtest errors in ``numpy.testing`` (#28539) Ported from numpy/numtype#252 --- numpy/testing/__init__.pyi | 128 ++--- numpy/testing/_private/utils.pyi | 541 +++++++++++---------- numpy/typing/tests/data/fail/testing.pyi | 8 +- numpy/typing/tests/data/reveal/testing.pyi | 6 +- 4 files changed, 360 insertions(+), 323 deletions(-) diff --git a/numpy/testing/__init__.pyi b/numpy/testing/__init__.pyi index e47b8f9546c6..ba3c9a2b7a44 100644 --- a/numpy/testing/__init__.pyi +++ b/numpy/testing/__init__.pyi @@ -2,97 +2,101 @@ from unittest import TestCase from . import overrides from ._private.utils import ( - NOGIL_BUILD, - IS_WASM, + HAS_LAPACK64, + HAS_REFCOUNT, + IS_EDITABLE, + IS_INSTALLED, + IS_MUSL, IS_PYPY, IS_PYSTON, - IS_MUSL, - IS_EDITABLE, - HAS_REFCOUNT, - HAS_LAPACK64, - assert_equal, + IS_WASM, + NOGIL_BUILD, + NUMPY_ROOT, + IgnoreException, + KnownFailureException, + SkipTest, + assert_, + assert_allclose, assert_almost_equal, assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_compare, assert_array_equal, assert_array_less, - assert_string_equal, - assert_array_almost_equal, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, assert_raises, + assert_raises_regex, + assert_string_equal, + assert_warns, + break_cycles, build_err_msg, + check_support_sve, + clear_and_catch_warnings, decorate_methods, jiffies, + measure, memusage, print_assert_equal, + run_threaded, rundocs, runstring, - verbose, - measure, - assert_, - assert_array_almost_equal_nulp, - assert_raises_regex, - assert_array_max_ulp, - assert_warns, - assert_no_warnings, - assert_allclose, - IgnoreException, - clear_and_catch_warnings, - SkipTest, - KnownFailureException, - temppath, - tempdir, suppress_warnings, - assert_array_compare, - assert_no_gc_cycles, - break_cycles, - check_support_sve, - run_threaded, + tempdir, + temppath, + verbose, ) __all__ = [ - "assert_equal", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "IS_EDITABLE", + "IS_INSTALLED", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "NOGIL_BUILD", + "NUMPY_ROOT", + "IgnoreException", + "KnownFailureException", + "SkipTest", + "TestCase", + "assert_", + "assert_allclose", "assert_almost_equal", "assert_approx_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_compare", "assert_array_equal", "assert_array_less", - "assert_string_equal", - "assert_array_almost_equal", + "assert_array_max_ulp", + "assert_equal", + "assert_no_gc_cycles", + "assert_no_warnings", "assert_raises", + "assert_raises_regex", + "assert_string_equal", + "assert_warns", + "break_cycles", "build_err_msg", + "check_support_sve", + "clear_and_catch_warnings", "decorate_methods", "jiffies", + "measure", "memusage", + "overrides", "print_assert_equal", + "run_threaded", "rundocs", "runstring", - "verbose", - "measure", - "assert_", - "assert_array_almost_equal_nulp", - "assert_raises_regex", - "assert_array_max_ulp", - "assert_warns", - "assert_no_warnings", - "assert_allclose", - "IgnoreException", - "clear_and_catch_warnings", - "SkipTest", - "KnownFailureException", - "temppath", - "tempdir", - "IS_PYPY", - "HAS_REFCOUNT", - "IS_WASM", "suppress_warnings", - "assert_array_compare", - "assert_no_gc_cycles", - "break_cycles", - "HAS_LAPACK64", - "IS_PYSTON", - "IS_MUSL", - "check_support_sve", - "NOGIL_BUILD", - "IS_EDITABLE", - "run_threaded", - "TestCase", - "overrides", + "tempdir", + "temppath", + "verbose", ] diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index b2f4045c7703..75ea45d3a721 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -1,42 +1,42 @@ -import sys import ast +import sys import types -import warnings import unittest -from _typeshed import GenericPath, StrOrBytesPath, StrPath +import warnings from collections.abc import Callable, Iterable, Sequence from contextlib import _GeneratorContextManager +from pathlib import Path from re import Pattern from typing import ( - Literal as L, Any, AnyStr, ClassVar, + Final, + Generic, NoReturn, + SupportsIndex, TypeAlias, overload, type_check_only, - TypeVar, - Final, - SupportsIndex, - ParamSpec ) +from typing import Literal as L +from unittest.case import SkipTest + +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath +from typing_extensions import ParamSpec, Self, TypeVar, TypeVarTuple, Unpack import numpy as np -from numpy import number, object_, _ConvertibleToFloat from numpy._typing import ( - NDArray, ArrayLike, DTypeLike, + NDArray, + _ArrayLikeDT64_co, _ArrayLikeNumber_co, _ArrayLikeObject_co, _ArrayLikeTD64_co, - _ArrayLikeDT64_co, ) -from unittest.case import SkipTest - -__all__ = [ +__all__ = [ # noqa: RUF022 "IS_EDITABLE", "IS_MUSL", "IS_PYPY", @@ -83,58 +83,33 @@ __all__ = [ "run_threaded", ] -_P = ParamSpec("_P") +### + _T = TypeVar("_T") -_ET = TypeVar("_ET", bound=BaseException) +_Ts = TypeVarTuple("_Ts") +_Tss = ParamSpec("_Tss") +_ET = TypeVar("_ET", bound=BaseException, default=BaseException) _FT = TypeVar("_FT", bound=Callable[..., Any]) +_W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) +_T_or_bool = TypeVar("_T_or_bool", default=bool) + +_StrLike: TypeAlias = str | bytes +_RegexLike: TypeAlias = _StrLike | Pattern[Any] +_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co -# Must return a bool or an ndarray/generic type -# that is supported by `np.logical_and.reduce` +_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] +_WarningSpec: TypeAlias = type[Warning] +_WarnLog: TypeAlias = list[warnings.WarningMessage] +_ToModules: TypeAlias = Iterable[types.ModuleType] + +# Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` _ComparisonFunc: TypeAlias = Callable[ [NDArray[Any], NDArray[Any]], - ( - bool - | np.bool - | number[Any] - | NDArray[np.bool | number[Any] | object_] - ) + bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], ] -class KnownFailureException(Exception): ... -class IgnoreException(Exception): ... - -class clear_and_catch_warnings(warnings.catch_warnings[list[warnings.WarningMessage]]): - class_modules: ClassVar[tuple[types.ModuleType, ...]] - modules: set[types.ModuleType] - @overload - def __new__( - cls, - record: L[False] = ..., - modules: Iterable[types.ModuleType] = ..., - ) -> _clear_and_catch_warnings_without_records: ... - @overload - def __new__( - cls, - record: L[True], - modules: Iterable[types.ModuleType] = ..., - ) -> _clear_and_catch_warnings_with_records: ... - @overload - def __new__( - cls, - record: bool, - modules: Iterable[types.ModuleType] = ..., - ) -> clear_and_catch_warnings: ... - def __enter__(self) -> None | list[warnings.WarningMessage]: ... - def __exit__( - self, - __exc_type: None | type[BaseException] = ..., - __exc_val: None | BaseException = ..., - __exc_tb: None | types.TracebackType = ..., - ) -> None: ... - # Type-check only `clear_and_catch_warnings` subclasses for both values of the # `record` parameter. Copied from the stdlib `warnings` stubs. - @type_check_only class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): def __enter__(self) -> list[warnings.WarningMessage]: ... @@ -143,321 +118,379 @@ class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): def __enter__(self) -> None: ... +### + +verbose: int = 0 +NUMPY_ROOT: Final[Path] = ... +IS_INSTALLED: Final[bool] = ... +IS_EDITABLE: Final[bool] = ... +IS_MUSL: Final[bool] = ... +IS_PYPY: Final[bool] = ... +IS_PYSTON: Final[bool] = ... +IS_WASM: Final[bool] = ... +HAS_REFCOUNT: Final[bool] = ... +HAS_LAPACK64: Final[bool] = ... +NOGIL_BUILD: Final[bool] = ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] + class_modules: ClassVar[tuple[types.ModuleType, ...]] = () + modules: Final[set[types.ModuleType]] + @overload # record: True + def __init__(self: clear_and_catch_warnings[_WarnLog], /, record: L[True], modules: _ToModules = ()) -> None: ... + @overload # record: False (default) + def __init__(self: clear_and_catch_warnings[None], /, record: L[False] = False, modules: _ToModules = ()) -> None: ... + @overload # record; bool + def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... + class suppress_warnings: - log: list[warnings.WarningMessage] - def __init__( - self, - forwarding_rule: L["always", "module", "once", "location"] = ..., - ) -> None: ... - def filter( - self, - category: type[Warning] = ..., - message: str = ..., - module: None | types.ModuleType = ..., - ) -> None: ... - def record( - self, - category: type[Warning] = ..., - message: str = ..., - module: None | types.ModuleType = ..., - ) -> list[warnings.WarningMessage]: ... - def __enter__(self: _T) -> _T: ... - def __exit__( - self, - __exc_type: None | type[BaseException] = ..., - __exc_val: None | BaseException = ..., - __exc_tb: None | types.TracebackType = ..., - ) -> None: ... - def __call__(self, func: _FT) -> _FT: ... - -verbose: int -IS_EDITABLE: Final[bool] -IS_MUSL: Final[bool] -IS_PYPY: Final[bool] -IS_PYSTON: Final[bool] -IS_WASM: Final[bool] -HAS_REFCOUNT: Final[bool] -HAS_LAPACK64: Final[bool] -NOGIL_BUILD: Final[bool] - -def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ... + log: Final[_WarnLog] + def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + def __call__(self, /, func: _FT) -> _FT: ... + + # + def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... + def record(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> _WarnLog: ... # Contrary to runtime we can't do `os.name` checks while type checking, # only `sys.platform` checks if sys.platform == "win32" or sys.platform == "cygwin": def memusage(processName: str = ..., instance: int = ...) -> int: ... elif sys.platform == "linux": - def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> None | int: ... + def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... else: def memusage() -> NoReturn: ... if sys.platform == "linux": - def jiffies( - _proc_pid_stat: StrOrBytesPath = ..., - _load_time: list[float] = ..., - ) -> int: ... + def jiffies(_proc_pid_stat: StrOrBytesPath = ..., _load_time: list[float] = []) -> int: ... else: - def jiffies(_load_time: list[float] = ...) -> int: ... + def jiffies(_load_time: list[float] = []) -> int: ... +# def build_err_msg( arrays: Iterable[object], - err_msg: str, + err_msg: object, header: str = ..., verbose: bool = ..., names: Sequence[str] = ..., - precision: None | SupportsIndex = ..., + precision: SupportsIndex | None = ..., ) -> str: ... +# +def print_assert_equal(test_string: str, actual: object, desired: object) -> None: ... + +# +def assert_(val: object, msg: str | Callable[[], str] = "") -> None: ... + +# def assert_equal( actual: object, desired: object, - err_msg: object = ..., - verbose: bool = ..., + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... -) -> None: ... - -def print_assert_equal( - test_string: str, - actual: object, - desired: object, + strict: bool = False, ) -> None: ... def assert_almost_equal( - actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, - desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, - decimal: int = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: int = 7, + err_msg: object = "", + verbose: bool = True, ) -> None: ... -# Anything that can be coerced into `builtins.float` +# def assert_approx_equal( - actual: _ConvertibleToFloat, - desired: _ConvertibleToFloat, - significant: int = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: ConvertibleToFloat, + desired: ConvertibleToFloat, + significant: int = 7, + err_msg: object = "", + verbose: bool = True, ) -> None: ... +# def assert_array_compare( comparison: _ComparisonFunc, x: ArrayLike, y: ArrayLike, - err_msg: object = ..., - verbose: bool = ..., - header: str = ..., - precision: SupportsIndex = ..., - equal_nan: bool = ..., - equal_inf: bool = ..., + err_msg: object = "", + verbose: bool = True, + header: str = "", + precision: SupportsIndex = 6, + equal_nan: bool = True, + equal_inf: bool = True, *, - strict: bool = ... + strict: bool = False, + names: tuple[str, str] = ("ACTUAL", "DESIRED"), ) -> None: ... +# def assert_array_equal( - x: ArrayLike, - y: ArrayLike, - /, - err_msg: object = ..., - verbose: bool = ..., + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... +# def assert_array_almost_equal( - x: _ArrayLikeNumber_co | _ArrayLikeObject_co, - y: _ArrayLikeNumber_co | _ArrayLikeObject_co, - /, - decimal: float = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: float = 6, + err_msg: object = "", + verbose: bool = True, ) -> None: ... @overload def assert_array_less( - x: _ArrayLikeNumber_co | _ArrayLikeObject_co, - y: _ArrayLikeNumber_co | _ArrayLikeObject_co, - err_msg: object = ..., - verbose: bool = ..., + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_array_less( x: _ArrayLikeTD64_co, y: _ArrayLikeTD64_co, - err_msg: object = ..., - verbose: bool = ..., + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_array_less( - x: _ArrayLikeDT64_co, - y: _ArrayLikeDT64_co, - err_msg: object = ..., - verbose: bool = ..., + x: _NumericArrayLike, + y: _NumericArrayLike, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... -def runstring( - astr: str | bytes | types.CodeType, - dict: None | dict[str, Any], -) -> Any: ... - +# def assert_string_equal(actual: str, desired: str) -> None: ... -def rundocs( - filename: StrPath | None = ..., - raise_on_error: bool = ..., -) -> None: ... - -def check_support_sve(__cache: list[_T]) -> _T: ... - -def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ... - -@overload -def assert_raises( # type: ignore - expected_exception: type[BaseException] | tuple[type[BaseException], ...], - callable: Callable[_P, Any], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> None: ... +# @overload def assert_raises( - expected_exception: type[_ET] | tuple[type[_ET], ...], + exception_class: _ExceptionSpec[_ET], + /, *, - msg: None | str = ..., + msg: str | None = None, ) -> unittest.case._AssertRaisesContext[_ET]: ... - @overload -def assert_raises_regex( - expected_exception: type[BaseException] | tuple[type[BaseException], ...], - expected_regex: str | bytes | Pattern[Any], - callable: Callable[_P, Any], +def assert_raises( + exception_class: _ExceptionSpec, + callable: Callable[_Tss, Any], /, - *args: _P.args, - **kwargs: _P.kwargs, + *args: _Tss.args, + **kwargs: _Tss.kwargs, ) -> None: ... + +# @overload def assert_raises_regex( - expected_exception: type[_ET] | tuple[type[_ET], ...], - expected_regex: str | bytes | Pattern[Any], + exception_class: _ExceptionSpec[_ET], + expected_regexp: _RegexLike, *, - msg: None | str = ..., + msg: str | None = None, ) -> unittest.case._AssertRaisesContext[_ET]: ... - -def decorate_methods( - cls: type[Any], - decorator: Callable[[Callable[..., Any]], Any], - testmatch: None | str | bytes | Pattern[Any] = ..., +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec, + expected_regexp: _RegexLike, + callable: Callable[_Tss, Any], + *args: _Tss.args, + **kwargs: _Tss.kwargs, ) -> None: ... -def measure( - code_str: str | bytes | ast.mod | ast.AST, - times: int = ..., - label: None | str = ..., -) -> float: ... - +# @overload def assert_allclose( - actual: _ArrayLikeNumber_co | _ArrayLikeObject_co, - desired: _ArrayLikeNumber_co | _ArrayLikeObject_co, - rtol: float = ..., - atol: float = ..., - equal_nan: bool = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... @overload def assert_allclose( - actual: _ArrayLikeTD64_co, - desired: _ArrayLikeTD64_co, - rtol: float = ..., - atol: float = ..., - equal_nan: bool = ..., - err_msg: object = ..., - verbose: bool = ..., + actual: _NumericArrayLike, + desired: _NumericArrayLike, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, *, - strict: bool = ... + strict: bool = False, ) -> None: ... +# def assert_array_almost_equal_nulp( x: _ArrayLikeNumber_co, y: _ArrayLikeNumber_co, - nulp: float = ..., + nulp: float = 1, ) -> None: ... +# def assert_array_max_ulp( a: _ArrayLikeNumber_co, b: _ArrayLikeNumber_co, - maxulp: float = ..., - dtype: DTypeLike = ..., + maxulp: float = 1, + dtype: DTypeLike | None = None, ) -> NDArray[Any]: ... +# @overload -def assert_warns(warning_class: type[Warning]) -> _GeneratorContextManager[None]: ... +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... @overload -def assert_warns( - warning_class: type[Warning], - func: Callable[_P, _T], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> _T: ... +def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +# @overload def assert_no_warnings() -> _GeneratorContextManager[None]: ... @overload -def assert_no_warnings( - func: Callable[_P, _T], - /, - *args: _P.args, - **kwargs: _P.kwargs, -) -> _T: ... +def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... +# +@overload +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... + +### + +# @overload def tempdir( - suffix: None = ..., - prefix: None = ..., - dir: None = ..., + suffix: None = None, + prefix: None = None, + dir: None = None, ) -> _GeneratorContextManager[str]: ... @overload def tempdir( - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, ) -> _GeneratorContextManager[AnyStr]: ... +# @overload def temppath( - suffix: None = ..., - prefix: None = ..., - dir: None = ..., - text: bool = ..., + suffix: None = None, + prefix: None = None, + dir: None = None, + text: bool = False, ) -> _GeneratorContextManager[str]: ... @overload def temppath( - suffix: AnyStr | None = ..., - prefix: AnyStr | None = ..., - dir: GenericPath[AnyStr] | None = ..., - text: bool = ..., + suffix: AnyStr | None, + prefix: AnyStr | None, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, ) -> _GeneratorContextManager[AnyStr]: ... +# +def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 + +# +def decorate_methods( + cls: type, + decorator: Callable[[Callable[..., Any]], Any], + testmatch: _RegexLike | None = None, +) -> None: ... + +# @overload -def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +def run_threaded( + func: Callable[[], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + prepare_args: None = None, +) -> None: ... @overload -def assert_no_gc_cycles( - func: Callable[_P, Any], - /, - *args: _P.args, - **kwargs: _P.kwargs, +def run_threaded( + func: Callable[[Unpack[_Ts]], None], + max_workers: int, + pass_count: bool, + pass_barrier: bool, + outer_iterations: int, + prepare_args: tuple[Unpack[_Ts]], +) -> None: ... +@overload +def run_threaded( + func: Callable[[Unpack[_Ts]], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + *, + prepare_args: tuple[Unpack[_Ts]], ) -> None: ... +# +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... +def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... def break_cycles() -> None: ... - -def run_threaded(func: Callable[[], None], iters: int, pass_count: bool = False) -> None: ... diff --git a/numpy/typing/tests/data/fail/testing.pyi b/numpy/typing/tests/data/fail/testing.pyi index 953670180203..f7eaa7d20836 100644 --- a/numpy/typing/tests/data/fail/testing.pyi +++ b/numpy/typing/tests/data/fail/testing.pyi @@ -3,7 +3,7 @@ import numpy.typing as npt AR_U: npt.NDArray[np.str_] -def func() -> bool: ... +def func(x: object) -> bool: ... np.testing.assert_(True, msg=1) # E: incompatible type np.testing.build_err_msg(1, "test") # E: incompatible type @@ -20,9 +20,9 @@ np.testing.assert_allclose(AR_U, AR_U) # E: incompatible type np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # E: incompatible type np.testing.assert_array_max_ulp(AR_U, AR_U) # E: incompatible type -np.testing.assert_warns(warning_class=RuntimeWarning, func=func) # E: No overload variant +np.testing.assert_warns(RuntimeWarning, func) # E: No overload variant np.testing.assert_no_warnings(func=func) # E: No overload variant -np.testing.assert_no_warnings(func, None) # E: Too many arguments -np.testing.assert_no_warnings(func, test=None) # E: No overload variant +np.testing.assert_no_warnings(func) # E: Too many arguments +np.testing.assert_no_warnings(func, y=None) # E: No overload variant np.testing.assert_no_gc_cycles(func=func) # E: No overload variant diff --git a/numpy/typing/tests/data/reveal/testing.pyi b/numpy/typing/tests/data/reveal/testing.pyi index 5301090a5f4b..741c71f62a5b 100644 --- a/numpy/typing/tests/data/reveal/testing.pyi +++ b/numpy/typing/tests/data/reveal/testing.pyi @@ -32,15 +32,15 @@ assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) assert_type( np.testing.clear_and_catch_warnings(modules=[np.testing]), - np.testing._private.utils._clear_and_catch_warnings_without_records, + np.testing.clear_and_catch_warnings[None], ) assert_type( np.testing.clear_and_catch_warnings(True), - np.testing._private.utils._clear_and_catch_warnings_with_records, + np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]], ) assert_type( np.testing.clear_and_catch_warnings(False), - np.testing._private.utils._clear_and_catch_warnings_without_records, + np.testing.clear_and_catch_warnings[None], ) assert_type( np.testing.clear_and_catch_warnings(bool_obj), From 110bb8e98d87ea291eb997a748f316bb286ad25b Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:51:34 +0100 Subject: [PATCH 148/187] TYP: fix typing errors in ``numpy.ndarray`` (#28540) Ported from numpy/numtype#200 --- numpy/__init__.pyi | 36 +++++-------------- numpy/typing/tests/data/fail/ndarray_misc.pyi | 2 -- 2 files changed, 8 insertions(+), 30 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 539c5fa53c24..e5c440bc8a79 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2231,11 +2231,9 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... @overload - def resize(self, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... - def setflags( - self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ... - ) -> None: ... + def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... def squeeze( self, @@ -2381,13 +2379,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): sorter: None | _ArrayLikeInt_co = ..., ) -> NDArray[intp]: ... - def setfield( - self, - val: ArrayLike, - dtype: DTypeLike, - offset: SupportsIndex = ..., - ) -> None: ... - def sort( self, axis: SupportsIndex = ..., @@ -2567,23 +2558,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload # (dtype: ?, type: type[T]) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: CanIndex = 0) -> None: ... @overload - def getfield( - self, - dtype: _DTypeLike[_SCT], - offset: SupportsIndex = ... - ) -> NDArray[_SCT]: ... + def getfield(self, dtype: _DTypeLike[_SCT], offset: SupportsIndex = 0) -> NDArray[_SCT]: ... @overload - def getfield( - self, - dtype: DTypeLike, - offset: SupportsIndex = ... - ) -> NDArray[Any]: ... + def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... - def __index__(self: NDArray[np.integer[Any]], /) -> int: ... - def __int__(self: NDArray[number[Any] | np.timedelta64 | np.bool | object_], /) -> int: ... - def __float__(self: NDArray[number[Any] | np.timedelta64 | np.bool | object_], /) -> float: ... - def __complex__(self: NDArray[number[Any] | np.bool | object_], /) -> complex: ... + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... def __len__(self) -> int: ... def __contains__(self, value: object, /) -> builtins.bool: ... @@ -2659,9 +2641,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # @overload # def __abs__(self: ndarray[_ShapeType, dtype[complex128]], /) -> ndarray[_ShapeType, dtype[float64]]: ... @overload - def __abs__( - self: ndarray[_ShapeT, dtype[complexfloating[_AnyNBitInexact]]], / - ) -> ndarray[_ShapeT, dtype[floating[_AnyNBitInexact]]]: ... + def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... @overload def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... diff --git a/numpy/typing/tests/data/fail/ndarray_misc.pyi b/numpy/typing/tests/data/fail/ndarray_misc.pyi index 38729557b43e..489aefca7ffc 100644 --- a/numpy/typing/tests/data/fail/ndarray_misc.pyi +++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -26,8 +26,6 @@ f8.setfield(2, np.float64) # E: has no attribute f8.sort() # E: has no attribute f8.trace() # E: has no attribute -AR_M.__int__() # E: Invalid self argument -AR_M.__float__() # E: Invalid self argument AR_M.__complex__() # E: Invalid self argument AR_b.__index__() # E: Invalid self argument From 18795c221083c69bd4950e570c8b849f45d7ce03 Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Sun, 16 Mar 2025 00:33:51 +0100 Subject: [PATCH 149/187] TYP: fix stubtest error in ``numpy.ma`` (#28541) Ported from numpy/numtype#280 --- numpy/ma/core.pyi | 84 ++++++++++++++++++++++++--------------------- numpy/ma/extras.pyi | 28 ++++++++------- 2 files changed, 61 insertions(+), 51 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 57136fa9d31c..83c0636ce4a7 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -1,19 +1,13 @@ -from collections.abc import Callable -from typing import Any, TypeVar - -from numpy import ( - amax, - amin, - bool_, - expand_dims, - clip, - indices, - squeeze, - angle, - ndarray, - dtype, - float64, -) +# pyright: reportIncompatibleMethodOverride=false +# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204 + +from typing import Any, SupportsIndex, TypeVar + +from _typeshed import Incomplete +from typing_extensions import deprecated + +from numpy import _OrderKACF, amax, amin, bool_, dtype, expand_dims, float64, ndarray +from numpy._typing import ArrayLike, _DTypeLikeBool __all__ = [ "MAError", @@ -111,8 +105,8 @@ __all__ = [ "less", "less_equal", "log", - "log10", "log2", + "log10", "logical_and", "logical_not", "logical_or", @@ -257,6 +251,7 @@ cosh: _MaskedUnaryOperation tanh: _MaskedUnaryOperation abs: _MaskedUnaryOperation absolute: _MaskedUnaryOperation +angle: _MaskedUnaryOperation fabs: _MaskedUnaryOperation negative: _MaskedUnaryOperation floor: _MaskedUnaryOperation @@ -284,20 +279,21 @@ greater_equal: _MaskedBinaryOperation less: _MaskedBinaryOperation greater: _MaskedBinaryOperation logical_and: _MaskedBinaryOperation -alltrue: _MaskedBinaryOperation +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... logical_or: _MaskedBinaryOperation -sometrue: Callable[..., Any] +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... logical_xor: _MaskedBinaryOperation bitwise_and: _MaskedBinaryOperation bitwise_or: _MaskedBinaryOperation bitwise_xor: _MaskedBinaryOperation hypot: _MaskedBinaryOperation -divide: _MaskedBinaryOperation -true_divide: _MaskedBinaryOperation -floor_divide: _MaskedBinaryOperation -remainder: _MaskedBinaryOperation -fmod: _MaskedBinaryOperation -mod: _MaskedBinaryOperation + +divide: _DomainedBinaryOperation +true_divide: _DomainedBinaryOperation +floor_divide: _DomainedBinaryOperation +remainder: _DomainedBinaryOperation +fmod: _DomainedBinaryOperation +mod: _DomainedBinaryOperation def make_mask_descr(ndtype): ... def getmask(a): ... @@ -448,10 +444,10 @@ class MaskedArray(ndarray[_ShapeType_co, _DType_co]): def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... def round(self, decimals=..., out=...): ... - def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ... - def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... + def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... def min(self, axis=..., out=..., fill_value=..., keepdims=...): ... # NOTE: deprecated # def tostring(self, fill_value=..., order=...): ... @@ -460,6 +456,7 @@ class MaskedArray(ndarray[_ShapeType_co, _DType_co]): def partition(self, *args, **kwargs): ... def argpartition(self, *args, **kwargs): ... def take(self, indices, axis=..., out=..., mode=...): ... + copy: Any diagonal: Any flatten: Any @@ -468,19 +465,26 @@ class MaskedArray(ndarray[_ShapeType_co, _DType_co]): swapaxes: Any T: Any transpose: Any + @property # type: ignore[misc] def mT(self): ... - def tolist(self, fill_value=...): ... - def tobytes(self, fill_value=..., order=...): ... - def tofile(self, fid, sep=..., format=...): ... - def toflex(self): ... - torecords: Any + + # + def toflex(self) -> Incomplete: ... + def torecords(self) -> Incomplete: ... + def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... + @deprecated("tostring() is deprecated. Use tobytes() instead.") + def tostring(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # def __reduce__(self): ... def __deepcopy__(self, memo=...): ... class mvoid(MaskedArray[_ShapeType_co, _DType_co]): def __new__( - self, + self, # pyright: ignore[reportSelfClsParameterName] data, mask=..., dtype=..., @@ -593,8 +597,8 @@ maximum: _extrema_operation def take(a, indices, axis=..., out=..., mode=...): ... def power(a, b, third=...): ... -def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... -def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., stable=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... +def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... def compressed(x): ... def concatenate(arrays, axis=...): ... def diag(v, k=...): ... @@ -629,19 +633,21 @@ def asanyarray(a, dtype=...): ... def fromflex(fxarray): ... class _convert2ma: - __doc__: Any - def __init__(self, funcname, params=...): ... - def getdoc(self): ... - def __call__(self, *args, **params): ... + def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... + def __call__(self, /, *args: object, **params: object) -> Any: ... # noqa: ANN401 + def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... arange: _convert2ma +clip: _convert2ma empty: _convert2ma empty_like: _convert2ma frombuffer: _convert2ma fromfunction: _convert2ma identity: _convert2ma +indices: _convert2ma ones: _convert2ma ones_like: _convert2ma +squeeze: _convert2ma zeros: _convert2ma zeros_like: _convert2ma diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 580309cc679d..ba76f3517526 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,9 +1,10 @@ -from typing import Any +from _typeshed import Incomplete +import numpy as np from numpy.lib._function_base_impl import average from numpy.lib._index_tricks_impl import AxisConcatenator -from .core import dot, mask_rowcols +from .core import MaskedArray, dot __all__ = [ "apply_along_axis", @@ -19,8 +20,8 @@ __all__ = [ "compress_nd", "compress_rowcols", "compress_rows", - "count_masked", "corrcoef", + "count_masked", "cov", "diagflat", "dot", @@ -30,9 +31,9 @@ __all__ = [ "flatnotmasked_edges", "hsplit", "hstack", - "isin", "in1d", "intersect1d", + "isin", "mask_cols", "mask_rowcols", "mask_rows", @@ -48,8 +49,8 @@ __all__ = [ "setdiff1d", "setxor1d", "stack", - "unique", "union1d", + "unique", "vander", "vstack", ] @@ -59,9 +60,9 @@ def masked_all(shape, dtype = ...): ... def masked_all_like(arr): ... class _fromnxfunction: - __name__: Any - __doc__: Any - def __init__(self, funcname): ... + __name__: Incomplete + __doc__: Incomplete + def __init__(self, funcname) -> None: ... def getdoc(self): ... def __call__(self, *args, **params): ... @@ -109,13 +110,13 @@ def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... class MAxisConcatenator(AxisConcatenator): - concatenate: Any + @staticmethod + def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @classmethod - def makemat(cls, arr): ... - def __getitem__(self, key): ... + def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] class mr_class(MAxisConcatenator): - def __init__(self): ... + def __init__(self) -> None: ... mr_: mr_class @@ -128,3 +129,6 @@ def clump_unmasked(a): ... def clump_masked(a): ... def vander(x, n=...): ... def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... + +# +def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... From 6f94b154ac44b1ee0d313cd510a8da6ce25941b1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 16 Mar 2025 07:16:09 -0600 Subject: [PATCH 150/187] REL: Prepare for the NumPy 2.2.4 release [wheel build] - Create 2.2.4-changelog.rst - Update 2.2.4-notes.rst --- doc/changelog/2.2.4-changelog.rst | 45 +++++++++++++++++++++ doc/source/release/2.2.4-notes.rst | 64 ++++++++++++++++++++++++------ 2 files changed, 96 insertions(+), 13 deletions(-) create mode 100644 doc/changelog/2.2.4-changelog.rst diff --git a/doc/changelog/2.2.4-changelog.rst b/doc/changelog/2.2.4-changelog.rst new file mode 100644 index 000000000000..1e2664ebde48 --- /dev/null +++ b/doc/changelog/2.2.4-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar +* Andrej Zhilenkov +* Andrew Nelson +* Charles Harris +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Jonathan Albrecht + +* Joren Hammudoglu +* Mark Harfouche +* Matthieu Darbois +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg +* Tyler Reddy +* lvllvl + + +Pull requests merged +==================== + +A total of 17 pull requests were merged for this release. + +* `#28333 `__: MAINT: Prepare 2.2.x for further development. +* `#28348 `__: TYP: fix positional- and keyword-only params in astype, cross... +* `#28377 `__: MAINT: Update FreeBSD version and fix test failure +* `#28379 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28385 `__: BUG: Make np.nonzero threading safe +* `#28420 `__: BUG: safer bincount casting (backport to 2.2.x) +* `#28422 `__: BUG: Fix building on s390x with clang +* `#28423 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28424 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28435 `__: BUG: Fix searchsorted and CheckFromAny byte-swapping logic +* `#28449 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28510 `__: MAINT: Hide decorator from pytest traceback +* `#28512 `__: TYP: Typing fixes backported from #28452, #28491, #28494 +* `#28521 `__: TYP: Backport fixes from #28505, #28506, #28508, and #28511 +* `#28533 `__: TYP: Backport typing fixes from main (2) +* `#28534 `__: TYP: Backport typing fixes from main (3) +* `#28542 `__: TYP: Backport typing fixes from main (4) diff --git a/doc/source/release/2.2.4-notes.rst b/doc/source/release/2.2.4-notes.rst index 9cde851301f9..8542c98a8af9 100644 --- a/doc/source/release/2.2.4-notes.rst +++ b/doc/source/release/2.2.4-notes.rst @@ -5,16 +5,54 @@ NumPy 2.2.4 Release Notes ========================== NumPy 2.2.4 is a patch release that fixes bugs found after the 2.2.3 release. - -Highlights -========== - -*We'll choose highlights for this release near the end of the release cycle.* - - -.. if release snippets have been incorporated already, uncomment the follow - line (leave the `.. include:: directive) - -.. **Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst +There are a large number of typing improvements, the rest of the changes are +the usual mix of bugfixes and platform maintenace. + +This release supports Python versions 3.10-3.13. + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar +* Andrej Zhilenkov +* Andrew Nelson +* Charles Harris +* Giovanni Del Monte +* Guan Ming(Wesley) Chiu + +* Jonathan Albrecht + +* Joren Hammudoglu +* Mark Harfouche +* Matthieu Darbois +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg +* Tyler Reddy +* lvllvl + + + +Pull requests merged +==================== + +A total of 17 pull requests were merged for this release. + +* `#28333 `__: MAINT: Prepare 2.2.x for further development. +* `#28348 `__: TYP: fix positional- and keyword-only params in astype, cross... +* `#28377 `__: MAINT: Update FreeBSD version and fix test failure +* `#28379 `__: BUG: numpy.loadtxt reads only 50000 lines when skip_rows >= max_rows +* `#28385 `__: BUG: Make np.nonzero threading safe +* `#28420 `__: BUG: safer bincount casting (backport to 2.2.x) +* `#28422 `__: BUG: Fix building on s390x with clang +* `#28423 `__: CI: use QEMU 9.2.2 for Linux Qemu tests +* `#28424 `__: BUG: skip legacy dtype multithreaded test on 32 bit runners +* `#28435 `__: BUG: Fix searchsorted and CheckFromAny byte-swapping logic +* `#28449 `__: BUG: sanity check ``__array_interface__`` number of dimensions +* `#28510 `__: MAINT: Hide decorator from pytest traceback +* `#28512 `__: TYP: Typing fixes backported from #28452, #28491, #28494 +* `#28521 `__: TYP: Backport fixes from #28505, #28506, #28508, and #28511 +* `#28533 `__: TYP: Backport typing fixes from main (2) +* `#28534 `__: TYP: Backport typing fixes from main (3) +* `#28542 `__: TYP: Backport typing fixes from main (4) From b50f579b4e910af08b552373737ab559ce7a23d1 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sun, 16 Mar 2025 12:42:18 -0600 Subject: [PATCH 151/187] MAINT: Prepare 2.2.x for further development - Create 2.2.5-notes.rst - Update release.rst - Update pavement - Update pyproject.toml [skip azp] [skip cirrus] [skip actions] --- doc/source/release.rst | 1 + doc/source/release/2.2.5-notes.rst | 10 ++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.5-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 13413f3a1b83..c9d9b3bf36e7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.5 2.2.4 2.2.3 2.2.2 diff --git a/doc/source/release/2.2.5-notes.rst b/doc/source/release/2.2.5-notes.rst new file mode 100644 index 000000000000..3fa8dc85abe6 --- /dev/null +++ b/doc/source/release/2.2.5-notes.rst @@ -0,0 +1,10 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.5 Release Notes +========================== + +NumPy 2.2.5 is a patch release that fixes bugs found after the 2.2.4 release. + +This release supports Python versions 3.10-3.13. + diff --git a/pavement.py b/pavement.py index e3e778d4bbfc..4caf4ba11a23 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.4-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.5-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index 3d9889d2eeed..a9223b22b326 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.4" +version = "2.2.5" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From e08ea56f4ea7e29cdc7723672df6885d5c44f4ed Mon Sep 17 00:00:00 2001 From: bgopi23 Date: Sat, 8 Mar 2025 20:15:54 +0800 Subject: [PATCH 152/187] FIX: Correct return type of NpyIter_GetIterNext in Cython declarations (#28446) --- numpy/__init__.cython-30.pxd | 2 +- numpy/__init__.pxd | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index e35cef5fa1a8..54af0e1f8319 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -1230,7 +1230,7 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, char** errmsg) except NULL char** NpyIter_GetDataPtrArray(NpyIter* it) nogil diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 89fe913b9cd3..0822e9c129e5 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -1145,7 +1145,7 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object - NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, char** errmsg) except NULL char** NpyIter_GetDataPtrArray(NpyIter* it) nogil From 48e3a43402c8a3c6e0d5b4761d1fbd197d75bd09 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 14 Mar 2025 11:44:29 -0600 Subject: [PATCH 153/187] MAINT: work around cython limitations, add test --- numpy/__init__.cython-30.pxd | 19 +++++++++++++------ numpy/__init__.pxd | 16 +++++++++++----- numpy/_core/tests/examples/cython/checks.pyx | 9 +++++++++ numpy/_core/tests/test_cython.py | 1 + 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 54af0e1f8319..0728aad4829f 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -858,6 +858,14 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + cdef extern from "numpy/arrayscalars.h": @@ -1109,10 +1117,6 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: return (obj).obmeta.base -# Iterator API added in v1.6 -ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil -ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil - cdef extern from "numpy/arrayobject.h": ctypedef struct NpyIter: @@ -1230,9 +1234,12 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL - NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, - char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL char** NpyIter_GetDataPtrArray(NpyIter* it) nogil char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil npy_intp* NpyIter_GetIndexPtr(NpyIter* it) diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 0822e9c129e5..6a62a3820042 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -773,6 +773,13 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil cdef extern from "numpy/arrayscalars.h": @@ -1024,10 +1031,6 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: return (obj).obmeta.base -# Iterator API added in v1.6 -ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil -ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil - cdef extern from "numpy/arrayobject.h": ctypedef struct NpyIter: @@ -1145,7 +1148,10 @@ cdef extern from "numpy/arrayobject.h": npy_intp* outstrides) except NPY_FAIL npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil # functions for iterating an NpyIter object - NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, char** errmsg) except NULL char** NpyIter_GetDataPtrArray(NpyIter* it) nogil diff --git a/numpy/_core/tests/examples/cython/checks.pyx b/numpy/_core/tests/examples/cython/checks.pyx index 34359fb42fcb..028dc6a6c9e4 100644 --- a/numpy/_core/tests/examples/cython/checks.pyx +++ b/numpy/_core/tests/examples/cython/checks.pyx @@ -242,6 +242,15 @@ def npyiter_has_multi_index(it: "nditer"): return result +def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cnp.NpyIter_GetGetMultiIndex(cit, NULL) + cdef cnp.NpyIter_IterNextFunc iternext = \ + cnp.NpyIter_GetIterNext(cit, NULL) + return 1 + + def npyiter_has_finished(it: "nditer"): cdef cnp.NpyIter* cit try: diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index d7fe28a8f053..ac29a2f7407b 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -267,6 +267,7 @@ def test_npyiter_api(install_temp): assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True assert checks.get_npyiter_ndim(it) == it.ndim == 2 + assert checks.test_get_multi_index_iter_next(it, arr) arr2 = np.random.rand(2, 1, 2) it = np.nditer([arr, arr2]) From 37985a2a900a304ab102fdd692f4ad3920fd797e Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Mon, 24 Mar 2025 09:41:40 -0600 Subject: [PATCH 154/187] BUG: avoid deadlocks with C++ shared mutex in dispatch cache --- numpy/_core/src/umath/dispatching.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/numpy/_core/src/umath/dispatching.cpp b/numpy/_core/src/umath/dispatching.cpp index 87b16cc176b8..1bbdc4adb7d1 100644 --- a/numpy/_core/src/umath/dispatching.cpp +++ b/numpy/_core/src/umath/dispatching.cpp @@ -912,7 +912,9 @@ promote_and_get_info_and_ufuncimpl_with_locking( npy_bool legacy_promotion_is_possible) { std::shared_mutex *mutex = ((std::shared_mutex *)((PyArrayIdentityHash *)ufunc->_dispatch_cache)->mutex); + NPY_BEGIN_ALLOW_THREADS mutex->lock_shared(); + NPY_END_ALLOW_THREADS PyObject *info = PyArrayIdentityHash_GetItem( (PyArrayIdentityHash *)ufunc->_dispatch_cache, (PyObject **)op_dtypes); @@ -926,7 +928,9 @@ promote_and_get_info_and_ufuncimpl_with_locking( // cache miss, need to acquire a write lock and recursively calculate the // correct dispatch resolution + NPY_BEGIN_ALLOW_THREADS mutex->lock(); + NPY_END_ALLOW_THREADS info = promote_and_get_info_and_ufuncimpl(ufunc, ops, signature, op_dtypes, legacy_promotion_is_possible); mutex->unlock(); From f514d1de2bd5265ea6328d9c384f1ab7d0ba58dd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 13 Mar 2025 03:57:41 +0100 Subject: [PATCH 155/187] TYP: fix typing errors in `_core.strings` Backports numpy/numtype#115 --- numpy/_core/strings.pyi | 158 ++++++++++++--------- numpy/typing/tests/data/fail/strings.pyi | 5 - numpy/typing/tests/data/reveal/strings.pyi | 10 +- 3 files changed, 95 insertions(+), 78 deletions(-) diff --git a/numpy/_core/strings.pyi b/numpy/_core/strings.pyi index b6c15b5c3ca3..a1ed1ff2b9a5 100644 --- a/numpy/_core/strings.pyi +++ b/numpy/_core/strings.pyi @@ -12,10 +12,57 @@ from numpy._typing import ( _SupportsArray, ) +__all__ = [ + "add", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "equal", + "expandtabs", + "find", + "greater", + "greater_equal", + "index", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "less", + "less_equal", + "ljust", + "lower", + "lstrip", + "mod", + "multiply", + "not_equal", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rstrip", + "startswith", + "str_len", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", +] _StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType] _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] -_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | _StringDTypeArray @overload def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... @@ -66,7 +113,7 @@ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... @overload def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... @overload -def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ... +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... @overload def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... @@ -78,13 +125,13 @@ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... @overload -def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +def mod(a: U_co, value: object) -> NDArray[np.str_]: ... @overload -def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... @overload -def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... @overload -def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... +def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... def isalpha(x: UST_co) -> NDArray[np.bool]: ... def isalnum(a: UST_co) -> NDArray[np.bool]: ... @@ -147,14 +194,14 @@ def index( a: U_co, sub: U_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def index( a: S_co, sub: S_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def index( @@ -169,14 +216,14 @@ def rindex( a: U_co, sub: U_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def rindex( a: S_co, sub: S_co, start: i_co = ..., - end: None | i_co = ..., + end: i_co | None = ..., ) -> NDArray[np.int_]: ... @overload def rindex( @@ -225,7 +272,7 @@ def startswith( @overload def startswith( a: T_co, - suffix: T_co, + prefix: T_co, start: i_co = ..., end: i_co | None = ..., ) -> NDArray[np.bool]: ... @@ -254,13 +301,13 @@ def endswith( def decode( a: S_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[np.str_]: ... def encode( a: U_co | T_co, - encoding: None | str = ..., - errors: None | str = ..., + encoding: str | None = None, + errors: str | None = None, ) -> NDArray[np.bytes_]: ... @overload @@ -273,74 +320,58 @@ def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTyp def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... @overload -def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... +def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +def center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... @overload -def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ... +def ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ... +def ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... @overload -def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def rjust( - a: U_co, - width: i_co, - fillchar: U_co = ..., -) -> NDArray[np.str_]: ... +def rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... @overload -def rjust( - a: S_co, - width: i_co, - fillchar: S_co = ..., -) -> NDArray[np.bytes_]: ... +def rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... @overload -def rjust( - a: _StringDTypeSupportsArray, - width: i_co, - fillchar: _StringDTypeSupportsArray = ..., -) -> _StringDTypeArray: ... +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... @overload -def rjust( - a: T_co, - width: i_co, - fillchar: T_co = ..., -) -> _StringDTypeOrUnicodeArray: ... +def rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... @overload -def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... @overload -def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def rstrip(a: U_co, char: None | U_co = ...) -> NDArray[np.str_]: ... +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... @overload -def rstrip(a: S_co, char: None | S_co = ...) -> NDArray[np.bytes_]: ... +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload -def strip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ... +def strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... @overload -def strip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ... +def strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... @overload -def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +def strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... @overload -def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ... +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... @overload def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... @@ -425,15 +456,6 @@ def replace( count: i_co = ..., ) -> _StringDTypeOrUnicodeArray: ... -@overload -def join(sep: U_co, seq: U_co) -> NDArray[np.str_]: ... -@overload -def join(sep: S_co, seq: S_co) -> NDArray[np.bytes_]: ... -@overload -def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... -@overload -def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... - @overload def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... @overload @@ -456,23 +478,23 @@ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... def translate( a: U_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> NDArray[np.str_]: ... @overload def translate( a: S_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> NDArray[np.bytes_]: ... @overload def translate( a: _StringDTypeSupportsArray, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> _StringDTypeArray: ... @overload def translate( a: T_co, table: str, - deletechars: None | str = ..., + deletechars: str | None = None, ) -> _StringDTypeOrUnicodeArray: ... diff --git a/numpy/typing/tests/data/fail/strings.pyi b/numpy/typing/tests/data/fail/strings.pyi index e284501c9d67..25c3c2ecc0d7 100644 --- a/numpy/typing/tests/data/fail/strings.pyi +++ b/numpy/typing/tests/data/fail/strings.pyi @@ -22,11 +22,6 @@ np.strings.decode(AR_U) # E: incompatible type np.strings.join(AR_U, b"_") # E: incompatible type np.strings.join(AR_S, "_") # E: incompatible type -np.strings.ljust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.strings.ljust(AR_S, 5, fillchar="a") # E: incompatible type -np.strings.rjust(AR_U, 5, fillchar=b"a") # E: incompatible type -np.strings.rjust(AR_S, 5, fillchar="a") # E: incompatible type - np.strings.lstrip(AR_U, b"a") # E: incompatible type np.strings.lstrip(AR_S, "a") # E: incompatible type np.strings.strip(AR_U, b"a") # E: incompatible type diff --git a/numpy/typing/tests/data/reveal/strings.pyi b/numpy/typing/tests/data/reveal/strings.pyi index 649902f0c6d3..9339456b61ae 100644 --- a/numpy/typing/tests/data/reveal/strings.pyi +++ b/numpy/typing/tests/data/reveal/strings.pyi @@ -67,27 +67,27 @@ assert_type(np.strings.expandtabs(AR_T), AR_T_alias) assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) -assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) -assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) assert_type(np.strings.lstrip(AR_T), AR_T_alias) -assert_type(np.strings.lstrip(AR_T, "_"), AR_TU_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_T_alias) assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) assert_type(np.strings.rstrip(AR_T), AR_T_alias) -assert_type(np.strings.rstrip(AR_T, "_"), AR_TU_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_T_alias) assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) assert_type(np.strings.strip(AR_T), AR_T_alias) -assert_type(np.strings.strip(AR_T, "_"), AR_TU_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_T_alias) assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) From 79624a8bc2aa9db8d12f0286e2b89358c0c439b6 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Tue, 1 Apr 2025 13:58:43 -0600 Subject: [PATCH 156/187] CI: Update Ubuntu to 22.04 in azure-pipelines Azure-pipelines will be removing support for Ubuntu-20.04. There remain uses of Ubuntu-20.04 in github actions, we may want to update those also, one use is to test against Python 3.6, which seems rather old to worry about. [skip cirrus] [skip actions] --- azure-pipelines.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2393a96d3f86..81bada011c31 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -19,7 +19,7 @@ stages: jobs: - job: Skip pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' variables: DECODE_PERCENTS: 'false' RET: 'true' @@ -40,7 +40,7 @@ stages: - job: Lint condition: and(succeeded(), eq(variables['Build.Reason'], 'PullRequest')) pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' steps: - task: UsePythonVersion@0 inputs: @@ -59,7 +59,7 @@ stages: - job: Linux_Python_310_32bit_full_with_asserts pool: - vmImage: 'ubuntu-20.04' + vmImage: 'ubuntu-22.04' steps: - script: | git submodule update --init From 9dadd451d51598ec2d27746e349672f91031a7b2 Mon Sep 17 00:00:00 2001 From: karl Date: Thu, 27 Mar 2025 21:35:38 -0400 Subject: [PATCH 157/187] BUG: Set writeable flag for writeable dlpacks. Explicitly set the writeable flag in from_dlpack as the inverse of the dlpack read_only flag. Previously it was not actually being set. Additionally, update the readonly logic such that legacy unversioned DLPacks are never writeable, for compatibility with old behavior. Fixes #28599 --- numpy/_core/_add_newdocs.py | 4 ++-- numpy/_core/src/multiarray/dlpack.c | 9 ++++----- numpy/_core/tests/test_dlpack.py | 11 +++++++++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 98a94973383a..d860aadedd83 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -1663,8 +1663,8 @@ from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` - protocol. Generally, the returned NumPy array is a read-only view - of the input object. See [1]_ and [2]_ for more details. + protocol. Generally, the returned NumPy array is a view of the input + object. See [1]_ and [2]_ for more details. Parameters ---------- diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 14fbc36c3bff..4bea7f9fc1ab 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -601,7 +601,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } dl_tensor = managed->dl_tensor; - readonly = 0; + readonly = 1; } const int ndim = dl_tensor.ndim; @@ -702,14 +702,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), } PyObject *ret = PyArray_NewFromDescr(&PyArray_Type, descr, ndim, shape, - dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); + dl_tensor.strides != NULL ? strides : NULL, data, readonly ? 0 : + NPY_ARRAY_WRITEABLE, NULL); + if (ret == NULL) { Py_DECREF(capsule); return NULL; } - if (readonly) { - PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); - } PyObject *new_capsule; if (versioned) { diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index d9205912124e..41dd72429580 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -144,6 +144,17 @@ def test_readonly(self): y = np.from_dlpack(x) assert not y.flags.writeable + def test_writeable(self): + x_new, x_old = new_and_old_dlpack() + + # new dlpacks respect writeability + y = np.from_dlpack(x_new) + assert y.flags.writeable + + # old dlpacks are not writeable for backwards compatibility + y = np.from_dlpack(x_old) + assert not y.flags.writeable + def test_ndim0(self): x = np.array(1.0) y = np.from_dlpack(x) From bba5a9001acec5d306891ba97b5497654beda371 Mon Sep 17 00:00:00 2001 From: Nicholas Christensen <11543181+nchristensen@users.noreply.github.com> Date: Wed, 2 Apr 2025 14:12:16 -0500 Subject: [PATCH 158/187] BUG: Fix crackfortran parsing error when a division occurs within a common block (#28396) * BUG: Fix crackpython parsing error when a division occurs within a common block * Add test for more complicated array sizing expression * fix typos * Simplify tests to currently supported syntax * Use regular expression to find common block name * Revert from broken regular expression version to prior version, add comment * Add space before inline comment * Use regular expression to split line * Add missing white space to appease linter * More linting fixes * Pass maxsplit as a keyword argument --- numpy/f2py/crackfortran.py | 21 ++----------------- .../src/crackfortran/common_with_division.f | 17 +++++++++++++++ numpy/f2py/tests/test_crackfortran.py | 6 +++++- 3 files changed, 24 insertions(+), 20 deletions(-) create mode 100644 numpy/f2py/tests/src/crackfortran/common_with_division.f diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 94cb64abe035..3ea1888df113 100644 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -1489,26 +1489,9 @@ def analyzeline(m, case, line): line = m.group('after').strip() if not line[0] == '/': line = '//' + line + cl = [] - f = 0 - bn = '' - ol = '' - for c in line: - if c == '/': - f = f + 1 - continue - if f >= 3: - bn = bn.strip() - if not bn: - bn = '_BLNK_' - cl.append([bn, ol]) - f = f - 2 - bn = '' - ol = '' - if f % 2: - bn = bn + c - else: - ol = ol + c + [_, bn, ol] = re.split('/', line, maxsplit=2) bn = bn.strip() if not bn: bn = '_BLNK_' diff --git a/numpy/f2py/tests/src/crackfortran/common_with_division.f b/numpy/f2py/tests/src/crackfortran/common_with_division.f new file mode 100644 index 000000000000..4aa12cf6dcee --- /dev/null +++ b/numpy/f2py/tests/src/crackfortran/common_with_division.f @@ -0,0 +1,17 @@ + subroutine common_with_division + integer lmu,lb,lub,lpmin + parameter (lmu=1) + parameter (lb=20) +c crackfortran fails to parse this +c parameter (lub=(lb-1)*lmu+1) +c crackfortran can successfully parse this though + parameter (lub=lb*lmu-lmu+1) + parameter (lpmin=2) + +c crackfortran fails to parse this correctly +c common /mortmp/ ctmp((lub*(lub+1)*(lub+1))/lpmin+1) + + common /mortmp/ ctmp(lub/lpmin+1) + + return + end diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index ed3588c25475..965a6b0f87e8 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -114,12 +114,16 @@ def incr(x): class TestCrackFortran(util.F2PyTest): # gh-2848: commented lines between parameters in subroutine parameter lists - sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90")] + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90"), + util.getpath("tests", "src", "crackfortran", "common_with_division.f") + ] def test_gh2848(self): r = self.module.gh2848(1, 2) assert r == (1, 2) + def test_common_with_division(self): + assert len(self.module.mortmp.ctmp) == 11 class TestMarkinnerspaces: # gh-14118: markinnerspaces does not handle multiple quotations From 0a7f8190512f8ce640c1818f7954539482c5d4ca Mon Sep 17 00:00:00 2001 From: jorenham Date: Fri, 4 Apr 2025 03:36:43 +0200 Subject: [PATCH 159/187] TYP: fix `ndarray.tolist()` and `.item()` for unknown dtype --- numpy/__init__.pyi | 55 +++++-------------- .../tests/data/reveal/ndarray_conversion.pyi | 11 +++- 2 files changed, 23 insertions(+), 43 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e5c440bc8a79..8e3447ee4d7c 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -1093,35 +1093,11 @@ class _SupportsItem(Protocol[_T_co]): class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... -@type_check_only -class _HasShape(Protocol[_ShapeT_co]): - @property - def shape(self, /) -> _ShapeT_co: ... - @type_check_only class _HasDType(Protocol[_T_co]): @property def dtype(self, /) -> _T_co: ... -@type_check_only -class _HasShapeAndSupportsItem(_HasShape[_ShapeT_co], _SupportsItem[_T_co], Protocol[_ShapeT_co, _T_co]): ... - -# matches any `x` on `x.type.item() -> _T_co`, e.g. `dtype[np.int8]` gives `_T_co: int` -@type_check_only -class _HasTypeWithItem(Protocol[_T_co]): - @property - def type(self, /) -> type[_SupportsItem[_T_co]]: ... - -# matches any `x` on `x.shape: _ShapeT_co` and `x.dtype.type.item() -> _T_co`, -# useful for capturing the item-type (`_T_co`) of the scalar-type of an array with -# specific shape (`_ShapeT_co`). -@type_check_only -class _HasShapeAndDTypeWithItem(Protocol[_ShapeT_co, _T_co]): - @property - def shape(self, /) -> _ShapeT_co: ... - @property - def dtype(self, /) -> _HasTypeWithItem[_T_co]: ... - @type_check_only class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): @property @@ -2204,29 +2180,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @property def flat(self) -> flatiter[Self]: ... - @overload # special casing for `StringDType`, which has no scalar type - def item(self: ndarray[Any, dtypes.StringDType], /) -> str: ... - @overload - def item(self: ndarray[Any, dtypes.StringDType], arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /) -> str: ... - @overload - def item(self: ndarray[Any, dtypes.StringDType], /, *args: SupportsIndex) -> str: ... @overload # use the same output type as that of the underlying `generic` - def item(self: _HasShapeAndDTypeWithItem[Any, _T], /) -> _T: ... - @overload - def item(self: _HasShapeAndDTypeWithItem[Any, _T], arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /) -> _T: ... - @overload - def item(self: _HasShapeAndDTypeWithItem[Any, _T], /, *args: SupportsIndex) -> _T: ... + def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + @overload # special casing for `StringDType`, which has no scalar type + def item( + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., + /, + *args: SupportsIndex, + ) -> str: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[()], _T], /) -> _T: ... + def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[int], _T], /) -> list[_T]: ... + def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[int, int], _T], /) -> list[list[_T]]: ... + def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... @overload - def tolist(self: _HasShapeAndSupportsItem[tuple[int, int, int], _T], /) -> list[list[list[_T]]]: ... + def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... @overload - def tolist(self: _HasShapeAndSupportsItem[Any, _T], /) -> _T | list[_T] | list[list[_T]] | list[list[list[Any]]]: ... + def tolist(self, /) -> Any: ... @overload def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... @@ -5379,7 +5352,7 @@ class matrix(ndarray[_2DShapeT_co, _DType_co]): def ptp(self, axis: None | _ShapeLike = ..., out: _ArrayT = ...) -> _ArrayT: ... def squeeze(self, axis: None | _ShapeLike = ...) -> matrix[_2D, _DType_co]: ... - def tolist(self: _SupportsItem[_T]) -> list[list[_T]]: ... + def tolist(self: matrix[Any, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] def ravel(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] def flatten(self, /, order: _OrderKACF = "C") -> matrix[tuple[L[1], int], _DType_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi index b6909e64f780..49181d2c98a6 100644 --- a/numpy/typing/tests/data/reveal/ndarray_conversion.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -30,8 +30,15 @@ assert_type(b1_0d.tolist(), bool) assert_type(u2_1d.tolist(), list[int]) assert_type(i4_2d.tolist(), list[list[int]]) assert_type(f8_3d.tolist(), list[list[list[float]]]) -assert_type(cG_4d.tolist(), complex | list[complex] | list[list[complex]] | list[list[list[Any]]]) -assert_type(i0_nd.tolist(), int | list[int] | list[list[int]] | list[list[list[Any]]]) +assert_type(cG_4d.tolist(), Any) +assert_type(i0_nd.tolist(), Any) + +# regression tests for numpy/numpy#27944 +any_dtype: np.ndarray[Any, Any] +any_sctype: np.ndarray[Any, Any] +assert_type(any_dtype.tolist(), Any) +assert_type(any_sctype.tolist(), Any) + # itemset does not return a value # tostring is pretty simple From 3c831617b5778038613c9f9d1672948e534815f9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 4 Apr 2025 01:35:06 -0600 Subject: [PATCH 160/187] BUG: fix deepcopying StringDType arrays (#28643) Fixes #28609 --- numpy/_core/src/multiarray/methods.c | 2 +- numpy/_core/tests/test_stringdtype.py | 22 +++++++++++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index 7f5bd29809a3..926efa54900f 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -1605,7 +1605,7 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, } } } - else { + else if (PyDataType_ISOBJECT(dtype)) { PyObject *itemp, *otemp; PyObject *res; memcpy(&itemp, iptr, sizeof(itemp)); diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index 29b52b27afe8..45d8088156c9 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -1,8 +1,9 @@ import concurrent.futures +import copy import itertools import os -import pickle import string +import pickle import sys import tempfile @@ -405,6 +406,13 @@ def test_pickle(dtype, string_list): os.remove(f.name) +def test_stdlib_copy(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + assert_array_equal(copy.copy(arr), arr) + assert_array_equal(copy.deepcopy(arr), arr) + + @pytest.mark.parametrize( "strings", [ @@ -1728,12 +1736,12 @@ def test_zeros(self): assert_array_equal(z, "") def test_copy(self): - c = self.a.copy() - assert_array_equal(self.get_flags(c), self.get_flags(self.a)) - assert_array_equal(c, self.a) - offsets = self.get_view(c)['offset'] - assert offsets[2] == 1 - assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + for c in [self.a.copy(), copy.copy(self.a), copy.deepcopy(self.a)]: + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + offsets = self.get_view(c)['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 def test_arena_use_with_setting(self): c = np.zeros_like(self.a) From ad79af13b080e56d439f0909d4b5570e5f2aea59 Mon Sep 17 00:00:00 2001 From: jorenham Date: Sun, 6 Apr 2025 17:29:56 +0200 Subject: [PATCH 161/187] TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` --- numpy/lib/_npyio_impl.pyi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index 19257a802d44..16d009524875 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -35,7 +35,7 @@ _SCT_co = TypeVar("_SCT_co", bound=np.generic, default=Any, covariant=True) _FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] _FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] _FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] -_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] @type_check_only class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): @@ -160,7 +160,7 @@ def loadtxt( ) -> NDArray[Any]: ... def savetxt( - fname: StrPath | _FNameWrite, + fname: _FNameWrite, X: ArrayLike, fmt: str | Sequence[str] = "%.18e", delimiter: str = " ", From 8f4eef72d3ff042a182700cbdfedfca6ce35fd4e Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sun, 6 Apr 2025 20:00:24 +0200 Subject: [PATCH 162/187] CI: Replace QEMU armhf with native (32-bit compatibility mode) (#28653) * CI: Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode * BUG, SIMD: Fix floating-point errors with positive infinity input in sqrt on armhf Guards against passing positive infinity to vrsqrteq_f32 in sqrt operation, which would raise invalid floating-point errors on ARMv7 architectures. * TEST: Mark linspace subnormal test as xfail on ARM32 platforms Adds an xfail marker to the linspace subnormal test case for ARMv7 and AArch32 platforms. These platforms seem to flush subnormals to zero (FTZ) even when not explicitly enabled via the FPSCR register, causing the test to fail. * BUG, SIMD: Fix ARMv8 feature detection in 32-bit mode Fix detection of `FPHP`, `ASIMDHP`, `ASIMDDP`, `ASIMDFHM` features on ARMv8 32-bit mode (aarch32). Fix memory leaks in CPU feature detection on Android by adding missing free() calls. * CI: Remove QEMU-based armhf testing Remove QEMU-based armhf testing as we now use native 32-bit compatibility mode running on ARM64 GitHub runners in a separate implementation. --- .github/workflows/linux.yml | 45 ++++++++ .github/workflows/linux_qemu.yml | 11 -- numpy/_core/src/common/npy_cpu_features.c | 41 ++++--- numpy/_core/src/common/npy_cpuinfo_parser.h | 113 ++++++++++++-------- numpy/_core/src/common/simd/neon/math.h | 12 +-- numpy/_core/tests/test_cpu_features.py | 15 +-- numpy/_core/tests/test_function_base.py | 8 +- 7 files changed, 156 insertions(+), 89 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b4826f2e1642..57e5cf53f225 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -155,6 +155,51 @@ jobs: env: PYTHONOPTIMIZE: 2 + + armhf_test: + # Tests NumPy on 32-bit ARM hard-float (armhf) via compatibility mode + # running on aarch64 (ARM 64-bit) GitHub runners. + needs: [smoke_test] + if: github.repository == 'numpy/numpy' + runs-on: ubuntu-22.04-arm + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + fetch-tags: true + persist-credentials: false + + - name: Creates new container + run: | + docker run --name the_container --interactive \ + -v $(pwd):/numpy arm32v7/ubuntu:22.04 /bin/linux32 /bin/bash -c " + apt update && + apt install -y ninja-build cmake git python3 python-is-python3 python3-dev python3-pip python3-venv && + python -m pip install -r /numpy/requirements/build_requirements.txt && + python -m pip install -r /numpy/requirements/test_requirements.txt + " + docker commit the_container the_container + + - name: Meson Build + run: | + docker run --rm -e "TERM=xterm-256color" \ + -v $(pwd):/numpy the_container \ + /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin build + '" + + - name: Meson Log + if: always() + run: 'cat build/meson-logs/meson-log.txt' + + - name: Run Tests + run: | + docker run --rm -e "TERM=xterm-256color" \ + -v $(pwd):/numpy the_container \ + /bin/script -e -q -c "/bin/linux32 /bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin test -m full -- --timeout=600 --durations=10 + '" + benchmark: needs: [smoke_test] runs-on: ubuntu-latest diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 15681f4c476f..de7f6f816cee 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -38,17 +38,6 @@ jobs: fail-fast: false matrix: BUILD_PROP: - - [ - "armhf", - "arm-linux-gnueabihf", - "arm32v7/ubuntu:22.04", - "-Dallow-noblas=true", - # test_unary_spurious_fpexception is currently skipped - # FIXME(@seiko2plus): Requires confirmation for the following issue: - # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception", - "arm" - ] - [ "ppc64le", "powerpc64le-linux-gnu", diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 7c0a4c60294c..2c1e064afda5 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -749,34 +749,33 @@ npy__cpu_init_features_linux(void) #endif } #ifdef __arm__ + npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0; + if (npy__cpu_have[NPY_CPU_FEATURE_NEON]) { + npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & NPY__HWCAP_HALF) != 0; + npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & NPY__HWCAP_VFPv4) != 0; + } // Detect Arm8 (aarch32 state) if ((hwcap2 & NPY__HWCAP2_AES) || (hwcap2 & NPY__HWCAP2_SHA1) || (hwcap2 & NPY__HWCAP2_SHA2) || (hwcap2 & NPY__HWCAP2_PMULL) || (hwcap2 & NPY__HWCAP2_CRC32)) { - hwcap = hwcap2; + npy__cpu_have[NPY_CPU_FEATURE_ASIMD] = npy__cpu_have[NPY_CPU_FEATURE_NEON]; + } #else - if (1) - { - if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { - // Is this could happen? maybe disabled by kernel - // BTW this will break the baseline of AARCH64 - return 1; - } -#endif - npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; - npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0; - npy__cpu_have[NPY_CPU_FEATURE_SVE] = (hwcap & NPY__HWCAP_SVE) != 0; - npy__cpu_init_features_arm8(); - } else { - npy__cpu_have[NPY_CPU_FEATURE_NEON] = (hwcap & NPY__HWCAP_NEON) != 0; - if (npy__cpu_have[NPY_CPU_FEATURE_NEON]) { - npy__cpu_have[NPY_CPU_FEATURE_NEON_FP16] = (hwcap & NPY__HWCAP_HALF) != 0; - npy__cpu_have[NPY_CPU_FEATURE_NEON_VFPV4] = (hwcap & NPY__HWCAP_VFPv4) != 0; - } + if (!(hwcap & (NPY__HWCAP_FP | NPY__HWCAP_ASIMD))) { + // Is this could happen? maybe disabled by kernel + // BTW this will break the baseline of AARCH64 + return 1; } + npy__cpu_init_features_arm8(); +#endif + npy__cpu_have[NPY_CPU_FEATURE_FPHP] = (hwcap & NPY__HWCAP_FPHP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDHP] = (hwcap & NPY__HWCAP_ASIMDHP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDDP] = (hwcap & NPY__HWCAP_ASIMDDP) != 0; + npy__cpu_have[NPY_CPU_FEATURE_ASIMDFHM] = (hwcap & NPY__HWCAP_ASIMDFHM) != 0; +#ifndef __arm__ + npy__cpu_have[NPY_CPU_FEATURE_SVE] = (hwcap & NPY__HWCAP_SVE) != 0; +#endif return 1; } #endif diff --git a/numpy/_core/src/common/npy_cpuinfo_parser.h b/numpy/_core/src/common/npy_cpuinfo_parser.h index 154c4245ba2b..30f2976d28b6 100644 --- a/numpy/_core/src/common/npy_cpuinfo_parser.h +++ b/numpy/_core/src/common/npy_cpuinfo_parser.h @@ -36,25 +36,43 @@ #define NPY__HWCAP 16 #define NPY__HWCAP2 26 -// arch/arm/include/uapi/asm/hwcap.h -#define NPY__HWCAP_HALF (1 << 1) -#define NPY__HWCAP_NEON (1 << 12) -#define NPY__HWCAP_VFPv3 (1 << 13) -#define NPY__HWCAP_VFPv4 (1 << 16) -#define NPY__HWCAP2_AES (1 << 0) -#define NPY__HWCAP2_PMULL (1 << 1) -#define NPY__HWCAP2_SHA1 (1 << 2) -#define NPY__HWCAP2_SHA2 (1 << 3) -#define NPY__HWCAP2_CRC32 (1 << 4) -// arch/arm64/include/uapi/asm/hwcap.h -#define NPY__HWCAP_FP (1 << 0) -#define NPY__HWCAP_ASIMD (1 << 1) -#define NPY__HWCAP_FPHP (1 << 9) -#define NPY__HWCAP_ASIMDHP (1 << 10) -#define NPY__HWCAP_ASIMDDP (1 << 20) -#define NPY__HWCAP_SVE (1 << 22) -#define NPY__HWCAP_ASIMDFHM (1 << 23) -/* +#ifdef __arm__ + // arch/arm/include/uapi/asm/hwcap.h + #define NPY__HWCAP_HALF (1 << 1) + #define NPY__HWCAP_NEON (1 << 12) + #define NPY__HWCAP_VFPv3 (1 << 13) + #define NPY__HWCAP_VFPv4 (1 << 16) + + #define NPY__HWCAP_FPHP (1 << 22) + #define NPY__HWCAP_ASIMDHP (1 << 23) + #define NPY__HWCAP_ASIMDDP (1 << 24) + #define NPY__HWCAP_ASIMDFHM (1 << 25) + + #define NPY__HWCAP2_AES (1 << 0) + #define NPY__HWCAP2_PMULL (1 << 1) + #define NPY__HWCAP2_SHA1 (1 << 2) + #define NPY__HWCAP2_SHA2 (1 << 3) + #define NPY__HWCAP2_CRC32 (1 << 4) +#else + // arch/arm64/include/uapi/asm/hwcap.h + #define NPY__HWCAP_FP (1 << 0) + #define NPY__HWCAP_ASIMD (1 << 1) + + #define NPY__HWCAP_FPHP (1 << 9) + #define NPY__HWCAP_ASIMDHP (1 << 10) + #define NPY__HWCAP_ASIMDDP (1 << 20) + #define NPY__HWCAP_ASIMDFHM (1 << 23) + + #define NPY__HWCAP_AES (1 << 3) + #define NPY__HWCAP_PMULL (1 << 4) + #define NPY__HWCAP_SHA1 (1 << 5) + #define NPY__HWCAP_SHA2 (1 << 6) + #define NPY__HWCAP_CRC32 (1 << 7) + #define NPY__HWCAP_SVE (1 << 22) +#endif + + +/* * Get the size of a file by reading it until the end. This is needed * because files under /proc do not always return a valid size when * using fseek(0, SEEK_END) + ftell(). Nor can they be mmap()-ed. @@ -87,7 +105,7 @@ get_file_size(const char* pathname) return result; } -/* +/* * Read the content of /proc/cpuinfo into a user-provided buffer. * Return the length of the data, or -1 on error. Does *not* * zero-terminate the content. Will not read more @@ -123,7 +141,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize) return count; } -/* +/* * Extract the content of a the first occurrence of a given field in * the content of /proc/cpuinfo and return it as a heap-allocated * string that must be freed by the caller. @@ -182,7 +200,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field) return result; } -/* +/* * Checks that a space-separated list of items contains one given 'item'. * Returns 1 if found, 0 otherwise. */ @@ -220,44 +238,51 @@ has_list_item(const char* list, const char* item) return 0; } -static void setHwcap(char* cpuFeatures, unsigned long* hwcap) { - *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; - *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; - *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; - - *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; - *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; - *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; - *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; -} - static int get_feature_from_proc_cpuinfo(unsigned long *hwcap, unsigned long *hwcap2) { - char* cpuinfo = NULL; - int cpuinfo_len; - cpuinfo_len = get_file_size("/proc/cpuinfo"); + *hwcap = 0; + *hwcap2 = 0; + + int cpuinfo_len = get_file_size("/proc/cpuinfo"); if (cpuinfo_len < 0) { return 0; } - cpuinfo = malloc(cpuinfo_len); + char *cpuinfo = malloc(cpuinfo_len); if (cpuinfo == NULL) { return 0; } + cpuinfo_len = read_file("/proc/cpuinfo", cpuinfo, cpuinfo_len); - char* cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); - if(cpuFeatures == NULL) { + char *cpuFeatures = extract_cpuinfo_field(cpuinfo, cpuinfo_len, "Features"); + if (cpuFeatures == NULL) { + free(cpuinfo); return 0; } - setHwcap(cpuFeatures, hwcap); - *hwcap2 |= *hwcap; + *hwcap |= has_list_item(cpuFeatures, "fphp") ? NPY__HWCAP_FPHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdhp") ? NPY__HWCAP_ASIMDHP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimddp") ? NPY__HWCAP_ASIMDDP : 0; + *hwcap |= has_list_item(cpuFeatures, "asimdfhm") ? NPY__HWCAP_ASIMDFHM : 0; +#ifdef __arm__ + *hwcap |= has_list_item(cpuFeatures, "neon") ? NPY__HWCAP_NEON : 0; + *hwcap |= has_list_item(cpuFeatures, "half") ? NPY__HWCAP_HALF : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv3") ? NPY__HWCAP_VFPv3 : 0; + *hwcap |= has_list_item(cpuFeatures, "vfpv4") ? NPY__HWCAP_VFPv4 : 0; *hwcap2 |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP2_AES : 0; *hwcap2 |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP2_PMULL : 0; *hwcap2 |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP2_SHA1 : 0; *hwcap2 |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP2_SHA2 : 0; *hwcap2 |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP2_CRC32 : 0; +#else + *hwcap |= has_list_item(cpuFeatures, "asimd") ? NPY__HWCAP_ASIMD : 0; + *hwcap |= has_list_item(cpuFeatures, "fp") ? NPY__HWCAP_FP : 0; + *hwcap |= has_list_item(cpuFeatures, "aes") ? NPY__HWCAP_AES : 0; + *hwcap |= has_list_item(cpuFeatures, "pmull") ? NPY__HWCAP_PMULL : 0; + *hwcap |= has_list_item(cpuFeatures, "sha1") ? NPY__HWCAP_SHA1 : 0; + *hwcap |= has_list_item(cpuFeatures, "sha2") ? NPY__HWCAP_SHA2 : 0; + *hwcap |= has_list_item(cpuFeatures, "crc32") ? NPY__HWCAP_CRC32 : 0; +#endif + free(cpuinfo); + free(cpuFeatures); return 1; } #endif /* NUMPY_CORE_SRC_COMMON_NPY_CPUINFO_PARSER_H_ */ diff --git a/numpy/_core/src/common/simd/neon/math.h b/numpy/_core/src/common/simd/neon/math.h index 58d14809fbfe..76c5b58be788 100644 --- a/numpy/_core/src/common/simd/neon/math.h +++ b/numpy/_core/src/common/simd/neon/math.h @@ -28,11 +28,13 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) // Based on ARM doc, see https://developer.arm.com/documentation/dui0204/j/CIHDIACI NPY_FINLINE npyv_f32 npyv_sqrt_f32(npyv_f32 a) { + const npyv_f32 one = vdupq_n_f32(1.0f); const npyv_f32 zero = vdupq_n_f32(0.0f); const npyv_u32 pinf = vdupq_n_u32(0x7f800000); npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf); - // guard against floating-point division-by-zero error - npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a); + npyv_u32 is_special = vorrq_u32(is_zero, is_inf); + // guard against division-by-zero and infinity input to vrsqrte to avoid invalid fp error + npyv_f32 guard_byz = vbslq_f32(is_special, one, a); // estimate to (1/√a) npyv_f32 rsqrte = vrsqrteq_f32(guard_byz); /** @@ -47,10 +49,8 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a) rsqrte = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a, rsqrte), rsqrte), rsqrte); // a * (1/√a) npyv_f32 sqrt = vmulq_f32(a, rsqrte); - // return zero if the a is zero - // - return zero if a is zero. - // - return positive infinity if a is positive infinity - return vbslq_f32(vorrq_u32(is_zero, is_inf), a, sqrt); + // Handle special cases: return a for zeros and positive infinities + return vbslq_f32(is_special, a, sqrt); } #endif // NPY_SIMD_F64 diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 956f9630a0c5..570a2b893b06 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -401,12 +401,15 @@ class Test_ARM_Features(AbstractTest): def load_flags(self): self.load_flags_cpuinfo("Features") arch = self.get_cpuinfo_item("CPU architecture") - # in case of mounting virtual filesystem of aarch64 kernel - is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0 - if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: - self.features_map = dict( - NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD" - ) + # in case of mounting virtual filesystem of aarch64 kernel without linux32 + is_rootfs_v8 = ( + not re.match("^armv[0-9]+l$", machine) and + (int('0' + next(iter(arch))) > 7 if arch else 0) + ) + if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = { + "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" + } else: self.features_map = dict( # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index 4f735b7ce359..b879f12ae8ea 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -1,5 +1,5 @@ import sys - +import platform import pytest import numpy as np @@ -14,6 +14,9 @@ IS_PYPY ) +def _is_armhf(): + # Check if the current platform is ARMHF (32-bit ARM architecture) + return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' class PhysicalQuantity(float): def __new__(cls, value): @@ -415,6 +418,9 @@ def __mul__(self, other): assert_equal(linspace(one, five), linspace(1, 5)) + # even when not explicitly enabled via FPSCR register + @pytest.mark.xfail(_is_armhf(), + reason="ARMHF/AArch32 platforms seem to FTZ subnormals") def test_denormal_numbers(self): # Regression test for gh-5437. Will probably fail when compiled # with ICC, which flushes denormals to zero From f8aa1239c01df440d393f02b3b2ae7f6a837ef09 Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Thu, 3 Apr 2025 20:51:21 +0200 Subject: [PATCH 163/187] SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD The fix in numpy/meson#12 for ASIMD*(32-bit) compile-time feature detection revealed a new build error on aarch32 platforms: ImportError: /numpy/build-install/usr/lib/python3/dist-packages/numpy/_core/ _multiarray_umath.cpython-310-arm-linux-gnueabihf.so: undefined symbol: _ZN2np7highway10qsort_simd11QSort_ASIMDIjEEvPT_i This patch prevents platform detection constants of Highway from being exposed across translation units with different compiler flags (baseline). This approach eliminates detection mismatches that were causing symbol resolution failures in the Highway QSort implementation. --- .../src/npysort/highway_qsort.dispatch.cpp | 37 +++++++++++-------- numpy/_core/src/npysort/highway_qsort.hpp | 17 +-------- .../npysort/highway_qsort_16bit.dispatch.cpp | 37 +++++++++++-------- numpy/_core/src/npysort/quicksort.cpp | 4 +- 4 files changed, 46 insertions(+), 49 deletions(-) diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 645055537d87..2893e817af08 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -1,22 +1,27 @@ +#define VQSORT_ONLY_STATIC 1 +#include "hwy/highway.h" +#include "hwy/contrib/sort/vqsort-inl.h" + #include "highway_qsort.hpp" +#include "quicksort.hpp" +namespace np::highway::qsort_simd { +template +void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) +{ #if VQSORT_ENABLED + hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); +#else + sort::Quick(arr, size); +#endif +} -#define DISPATCH_VQSORT(TYPE) \ -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ -{ \ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); \ -} \ - -namespace np { namespace highway { namespace qsort_simd { - - DISPATCH_VQSORT(int32_t) - DISPATCH_VQSORT(uint32_t) - DISPATCH_VQSORT(int64_t) - DISPATCH_VQSORT(uint64_t) - DISPATCH_VQSORT(double) - DISPATCH_VQSORT(float) +template void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(float*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(double*, npy_intp); -} } } // np::highway::qsort_simd +} // np::highway::qsort_simd -#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index 77cd9f085943..b52e6da2b621 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,33 +1,20 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP -#define VQSORT_ONLY_STATIC 1 -#include "hwy/highway.h" -#include "hwy/contrib/sort/vqsort-inl.h" - #include "common.hpp" -#if !VQSORT_COMPILER_COMPATIBLE -#define NPY_DISABLE_HIGHWAY_SORT -#endif - -#ifndef NPY_DISABLE_HIGHWAY_SORT -namespace np { namespace highway { namespace qsort_simd { +namespace np::highway::qsort_simd { #ifndef NPY_DISABLE_OPTIMIZATION #include "highway_qsort.dispatch.h" #endif NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) -NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) - #ifndef NPY_DISABLE_OPTIMIZATION #include "highway_qsort_16bit.dispatch.h" #endif NPY_CPU_DISPATCH_DECLARE(template void QSort, (T *arr, npy_intp size)) -NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp num, npy_intp kth)) -} } } // np::highway::qsort_simd +} // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP -#endif // NPY_DISABLE_HIGHWAY_SORT diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index d151de2b5e62..a7466709654d 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -1,28 +1,33 @@ -#include "highway_qsort.hpp" +#define VQSORT_ONLY_STATIC 1 +#include "hwy/highway.h" +#include "hwy/contrib/sort/vqsort-inl.h" +#include "highway_qsort.hpp" #include "quicksort.hpp" -#if VQSORT_ENABLED - -namespace np { namespace highway { namespace qsort_simd { - -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, intptr_t size) +namespace np::highway::qsort_simd { +template +void NPY_CPU_DISPATCH_CURFX(QSort)(T *arr, npy_intp size) { -#if HWY_HAVE_FLOAT16 - hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast(arr), size, hwy::SortAscending()); +#if VQSORT_ENABLED + using THwy = std::conditional_t, hwy::float16_t, T>; + hwy::HWY_NAMESPACE::VQSortStatic(reinterpret_cast(arr), size, hwy::SortAscending()); #else sort::Quick(arr, size); #endif } -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, intptr_t size) +#if !HWY_HAVE_FLOAT16 +template <> +void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); + sort::Quick(arr, size); } +#endif // !HWY_HAVE_FLOAT16 -} } } // np::highway::qsort_simd +template void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t*, npy_intp); +template void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t*, npy_intp); +#if HWY_HAVE_FLOAT16 +template void NPY_CPU_DISPATCH_CURFX(QSort)(Half*, npy_intp); +#endif -#endif // VQSORT_ENABLED +} // np::highway::qsort_simd diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index aca748056f39..15e5668f599d 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -84,7 +84,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #elif !defined(NPY_DISABLE_HIGHWAY_SORT) + #else #include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif @@ -95,7 +95,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #elif !defined(NPY_DISABLE_HIGHWAY_SORT) + #else #include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif From affe6be732b554f6be67b33fe19f29872538aa98 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 9 Apr 2025 17:42:55 +0200 Subject: [PATCH 164/187] TYP: add missing `"b1"` literals for `dtype[bool]` --- numpy/_typing/_char_codes.py | 6 +++++- numpy/typing/tests/data/reveal/dtype.pyi | 6 ++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index a14c01a513ba..56154c7af383 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,6 +1,10 @@ from typing import Literal -_BoolCodes = Literal["bool", "bool_", "?", "|?", "=?", "?"] +_BoolCodes = Literal[ + "bool", "bool_", + "?", "|?", "=?", "?", + "b1", "|b1", "=b1", "b1", +] # fmt: skip _UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] _UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] diff --git a/numpy/typing/tests/data/reveal/dtype.pyi b/numpy/typing/tests/data/reveal/dtype.pyi index 4cd6d4a11aff..da37778b177b 100644 --- a/numpy/typing/tests/data/reveal/dtype.pyi +++ b/numpy/typing/tests/data/reveal/dtype.pyi @@ -7,7 +7,7 @@ from typing import Any, Literal, TypeAlias import numpy as np from numpy.dtypes import StringDType -from typing_extensions import assert_type +from typing_extensions import LiteralString, assert_type # a combination of likely `object` dtype-like candidates (no `_co`) _PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta @@ -71,6 +71,8 @@ assert_type(np.dtype(Decimal), np.dtype[np.object_]) assert_type(np.dtype(Fraction), np.dtype[np.object_]) # char-codes +assert_type(np.dtype("?"), np.dtype[np.bool]) +assert_type(np.dtype("|b1"), np.dtype[np.bool]) assert_type(np.dtype("u1"), np.dtype[np.uint8]) assert_type(np.dtype("l"), np.dtype[np.long]) assert_type(np.dtype("longlong"), np.dtype[np.longlong]) @@ -113,7 +115,7 @@ assert_type(dtype_U.base, np.dtype[Any]) assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]]) assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) assert_type(dtype_U.type, type[np.str_]) -assert_type(dtype_U.name, str) +assert_type(dtype_U.name, LiteralString) assert_type(dtype_U.names, None | tuple[str, ...]) assert_type(dtype_U * 0, np.dtype[np.str_]) From 063a9cdb77639f04d91b9ec8b3da081a4b41976f Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 14 Apr 2025 05:20:19 +0200 Subject: [PATCH 165/187] TYP: Fix false rejection of ``NDArray[object_].__abs__()`` --- numpy/__init__.pyi | 1 + numpy/typing/tests/data/reveal/arithmetic.pyi | 1 + 2 files changed, 2 insertions(+) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8e3447ee4d7c..b78e870a0dc9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3960,6 +3960,7 @@ class object_(_RealMixin, generic[Any]): def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] def __init__(self, value: object = ..., /) -> None: ... def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ def __call__(self, /, *args: object, **kwargs: object) -> Any: ... if sys.version_info >= (3, 12): diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 46ac003508c4..cc21a99006cd 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -308,6 +308,7 @@ assert_type(abs(m8_none), np.timedelta64[None]) assert_type(abs(m8_int), np.timedelta64[int]) assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) assert_type(abs(b_), np.bool) +assert_type(abs(AR_O), npt.NDArray[np.object_]) # Time structures From bd89621c8b073ec5aca9a4b677a1cdea364e1039 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 14 Apr 2025 15:25:51 +0200 Subject: [PATCH 166/187] TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return type with ``float`` --- numpy/__init__.pyi | 4 +- numpy/typing/tests/data/reveal/arithmetic.pyi | 86 +++++++++++++++++++ 2 files changed, 88 insertions(+), 2 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 8e3447ee4d7c..28c37a36704e 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2994,7 +2994,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __truediv__(self: _ArrayInt_co | NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -3025,7 +3025,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rtruediv__(self: _ArrayInt_co | NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 46ac003508c4..a55ecb2cbd5d 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -185,6 +185,92 @@ assert_type(AR_LIKE_m - AR_O, Any) assert_type(AR_LIKE_M - AR_O, Any) assert_type(AR_LIKE_O - AR_O, Any) +# Array "true" division + +assert_type(AR_f / b, npt.NDArray[np.float64]) +assert_type(AR_f / i, npt.NDArray[np.float64]) +assert_type(AR_f / f, npt.NDArray[np.float64]) + +assert_type(b / AR_f, npt.NDArray[np.float64]) +assert_type(i / AR_f, npt.NDArray[np.float64]) +assert_type(f / AR_f, npt.NDArray[np.float64]) + +assert_type(AR_b / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_b, Any) + +assert_type(AR_u / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_u, Any) + +assert_type(AR_i / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_i, Any) + +assert_type(AR_f / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_f, Any) + +assert_type(AR_m / AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_m, npt.NDArray[np.float64]) +assert_type(AR_m / AR_LIKE_O, Any) + +assert_type(AR_LIKE_m / AR_m, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_m, Any) + +assert_type(AR_O / AR_LIKE_b, Any) +assert_type(AR_O / AR_LIKE_u, Any) +assert_type(AR_O / AR_LIKE_i, Any) +assert_type(AR_O / AR_LIKE_f, Any) +assert_type(AR_O / AR_LIKE_m, Any) +assert_type(AR_O / AR_LIKE_M, Any) +assert_type(AR_O / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_O, Any) +assert_type(AR_LIKE_u / AR_O, Any) +assert_type(AR_LIKE_i / AR_O, Any) +assert_type(AR_LIKE_f / AR_O, Any) +assert_type(AR_LIKE_m / AR_O, Any) +assert_type(AR_LIKE_M / AR_O, Any) +assert_type(AR_LIKE_O / AR_O, Any) + # Array floor division assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) From 48fe43208fcc236432d2723eae46ad49cde9abd5 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 15 Apr 2025 18:14:18 +0200 Subject: [PATCH 167/187] TYP: fix string-like ``ndarray`` rich comparison operators --- numpy/__init__.pyi | 63 +++++++++++++-------- numpy/typing/tests/data/pass/comparisons.py | 16 +++++- 2 files changed, 55 insertions(+), 24 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index e430a5f8869b..afc93ed40d2b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -22,7 +22,6 @@ from numpy._typing import ( NDArray, _SupportsArray, _NestedSequence, - _FiniteNestedSequence, _ArrayLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, @@ -33,20 +32,20 @@ from numpy._typing import ( _ArrayLikeComplex128_co, _ArrayLikeComplex_co, _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeBytes_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, _ArrayLikeTD64_co, _ArrayLikeDT64_co, - _ArrayLikeObject_co, - # DTypes DTypeLike, _DTypeLike, _DTypeLikeVoid, _VoidDTypeLike, - # Shapes _Shape, _ShapeLike, - # Scalars _CharLike_co, _IntLike_co, @@ -54,7 +53,6 @@ from numpy._typing import ( _TD64Like_co, _NumberLike_co, _ScalarLike_co, - # `number` precision NBitBase, # NOTE: Do not remove the extended precision bit-types even if seemingly unused; @@ -77,7 +75,6 @@ from numpy._typing import ( _NBitSingle, _NBitDouble, _NBitLongDouble, - # Character codes _BoolCodes, _UInt8Codes, @@ -119,7 +116,6 @@ from numpy._typing import ( _VoidCodes, _ObjectCodes, _StringCodes, - _UnsignedIntegerCodes, _SignedIntegerCodes, _IntegerCodes, @@ -130,7 +126,6 @@ from numpy._typing import ( _CharacterCodes, _FlexibleCodes, _GenericCodes, - # Ufuncs _UFunc_Nin1_Nout1, _UFunc_Nin2_Nout1, @@ -2552,12 +2547,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload # ?-d def __iter__(self, /) -> Iterator[Any]: ... - # The last overload is for catching recursive objects whose - # nesting is too deep. - # The first overload is for catching `bytes` (as they are a subtype of - # `Sequence[int]`) and `str`. As `str` is a recursive sequence of - # strings, it will pass through the final overload otherwise - + # @overload def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2565,10 +2555,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __lt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + # @overload def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2576,10 +2573,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __le__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + # @overload def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2587,10 +2591,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __gt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + # @overload def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload @@ -2598,9 +2609,15 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... # Unary ops diff --git a/numpy/typing/tests/data/pass/comparisons.py b/numpy/typing/tests/data/pass/comparisons.py index 0babc321b32d..a461d8b660da 100644 --- a/numpy/typing/tests/data/pass/comparisons.py +++ b/numpy/typing/tests/data/pass/comparisons.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any +from typing import cast, Any import numpy as np c16 = np.complex128() @@ -30,6 +30,9 @@ AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1.0j]) +AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") +AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) @@ -66,6 +69,17 @@ AR_c > AR_f AR_c > AR_c +AR_S > AR_S +AR_S > b"" + +AR_T > AR_T +AR_T > AR_U +AR_T > "" + +AR_U > AR_U +AR_U > AR_T +AR_U > "" + AR_m > AR_b AR_m > AR_u AR_m > AR_i From cbafc53294ee1a4d95fce7dee38a854b2d431623 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 17 Apr 2025 16:35:33 +0200 Subject: [PATCH 168/187] TYP: some ``[arg]partition`` fixes --- numpy/__init__.pyi | 49 ++++++++++++++++++++++++++++--------- numpy/_core/fromnumeric.pyi | 35 +++++++++++++++++--------- 2 files changed, 60 insertions(+), 24 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index afc93ed40d2b..fac2e8ec3a1b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2293,14 +2293,47 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): where: _ArrayLikeBool_co = True, ) -> _ArrayT: ... + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload def argpartition( self, - kth: _ArrayLikeInt_co, - axis: None | SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> NDArray[intp]: ... + @overload + def argpartition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... + # def diagonal( self, offset: SupportsIndex = ..., @@ -2320,14 +2353,6 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): # `nonzero()` is deprecated for 0d arrays/generics def nonzero(self) -> tuple[NDArray[intp], ...]: ... - def partition( - self, - kth: _ArrayLikeInt_co, - axis: SupportsIndex = ..., - kind: _PartitionKind = ..., - order: None | str | Sequence[str] = ..., - ) -> None: ... - # `put` is technically available to `generic`, # but is pointless as `generic`s are immutable def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 52f48efa9345..48648593d72f 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -47,11 +47,11 @@ from numpy._typing import ( _ShapeLike, _ArrayLikeBool_co, _ArrayLikeUInt_co, + _ArrayLikeInt, _ArrayLikeInt_co, _ArrayLikeFloat_co, _ArrayLikeComplex_co, _ArrayLikeObject_co, - _ArrayLikeTD64_co, _IntLike_co, _BoolLike_co, _ComplexLike_co, @@ -322,31 +322,42 @@ def matrix_transpose(x: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ... @overload def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... +# @overload def partition( a: _ArrayLike[_SCT], - kth: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - kind: _PartitionKind = ..., - order: str | Sequence[str] | None = ..., + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, ) -> NDArray[_SCT]: ... @overload +def partition( + a: _ArrayLike[np.void], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[np.void]: ... +@overload def partition( a: ArrayLike, - kth: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., - kind: _PartitionKind = ..., - order: str | Sequence[str] | None = ..., + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[Any]: ... +# def argpartition( a: ArrayLike, - kth: _ArrayLikeInt_co, + kth: _ArrayLikeInt, axis: SupportsIndex | None = -1, - kind: _PartitionKind = ..., - order: str | Sequence[str] | None = ..., + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, ) -> NDArray[intp]: ... +# @overload def sort( a: _ArrayLike[_SCT], From b0c01d5592763defcbe216edc4a28ceb0cb7ee89 Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 16 Apr 2025 18:41:55 +0200 Subject: [PATCH 169/187] TYP: fix incorrect `random.Generator.integers` return type --- numpy/random/_generator.pyi | 474 +++++++++++++--------- numpy/typing/tests/data/reveal/random.pyi | 4 +- 2 files changed, 274 insertions(+), 204 deletions(-) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 84b97883223d..7ed4a959625f 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -1,29 +1,17 @@ from collections.abc import Callable -from typing import Any, TypeAlias, overload, TypeVar, Literal +from typing import Any, Literal, TypeAlias, TypeVar, overload import numpy as np -from numpy import ( - dtype, - float32, - float64, - int8, - int16, - int32, - int64, - int_, - uint, - uint8, - uint16, - uint32, - uint64, -) -from numpy.random import BitGenerator, SeedSequence, RandomState +from numpy import dtype, float32, float64, int64 from numpy._typing import ( ArrayLike, + DTypeLike, NDArray, _ArrayLikeFloat_co, _ArrayLikeInt_co, + _BoolCodes, _DoubleCodes, + _DTypeLike, _DTypeLikeBool, _Float32Codes, _Float64Codes, @@ -32,7 +20,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, - _IntCodes, + _IntPCodes, _ShapeLike, _SingleCodes, _SupportsDType, @@ -40,10 +28,11 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, - _UIntCodes, + _UIntPCodes, ) +from numpy.random import BitGenerator, RandomState, SeedSequence -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_IntegerT = TypeVar("_IntegerT", bound=np.integer) _DTypeLikeFloat32: TypeAlias = ( dtype[float32] @@ -198,249 +187,296 @@ class Generator: ) -> float: ... # type: ignore[misc] @overload def beta( - self, a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] @overload - def exponential( - self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... - ) -> NDArray[float64]: ... + def exponential(self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ...) -> NDArray[float64]: ... + + # @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - *, - endpoint: bool = ..., - ) -> int: ... + high: int | None = None, + size: None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> np.int64: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[bool] = ..., - endpoint: bool = ..., + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + endpoint: bool = False, ) -> bool: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[np.bool] = ..., - endpoint: bool = ..., - ) -> np.bool: ... + high: int | None = None, + size: None = None, + *, + dtype: type[int], + endpoint: bool = False, + ) -> int: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: type[int] = ..., - endpoint: bool = ..., - ) -> int: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + endpoint: bool = False, + ) -> np.bool: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - endpoint: bool = ..., - ) -> uint8: ... + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> _IntegerT: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - endpoint: bool = ..., - ) -> uint16: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> NDArray[np.int64]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - endpoint: bool = ..., - ) -> uint32: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + endpoint: bool = False, + ) -> NDArray[np.bool]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - endpoint: bool = ..., - ) -> uint: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> NDArray[_IntegerT]: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - endpoint: bool = ..., - ) -> uint64: ... + high: int | None = None, + size: None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> np.int8: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - endpoint: bool = ..., - ) -> int8: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> NDArray[np.int8]: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - endpoint: bool = ..., - ) -> int16: ... + high: int | None = None, + size: None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> np.uint8: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - endpoint: bool = ..., - ) -> int32: ... + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> NDArray[np.uint8]: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - endpoint: bool = ..., - ) -> int_: ... + high: int | None = None, + size: None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> np.int16: ... @overload - def integers( # type: ignore[misc] + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> NDArray[np.int16]: ... + @overload + def integers( self, low: int, - high: None | int = ..., - size: None = ..., - dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - endpoint: bool = ..., - ) -> int64: ... + high: int | None = None, + size: None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> np.uint16: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, *, - endpoint: bool = ... - ) -> NDArray[int64]: ... + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> NDArray[np.uint16]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: _DTypeLikeBool = ..., - endpoint: bool = ..., - ) -> NDArray[np.bool]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> np.int32: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., - endpoint: bool = ..., - ) -> NDArray[int8]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> NDArray[np.int32]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., - endpoint: bool = ..., - ) -> NDArray[int16]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> np.uint32: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., - endpoint: bool = ..., - ) -> NDArray[int32]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> NDArray[np.uint32]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: None | dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., - endpoint: bool = ..., - ) -> NDArray[int64]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> np.uint64: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint8]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> NDArray[np.uint64]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint16]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> np.intp: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint32]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> NDArray[np.intp]: ... @overload - def integers( # type: ignore[misc] + def integers( self, - low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint64]: ... + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> np.uintp: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., - endpoint: bool = ..., - ) -> NDArray[int_]: ... + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> NDArray[np.uintp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> Any: ... @overload - def integers( # type: ignore[misc] + def integers( self, low: _ArrayLikeInt_co, - high: None | _ArrayLikeInt_co = ..., - size: None | _ShapeLike = ..., - dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., - endpoint: bool = ..., - ) -> NDArray[uint]: ... - # TODO: Use a TypeVar _T here to get away from Any output? Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> NDArray[Any]: ... + + # TODO: Use a TypeVar _T here to get away from Any output? + # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] @overload def choice( self, @@ -547,7 +583,9 @@ class Generator: out: None | NDArray[float64] = ..., ) -> NDArray[float64]: ... @overload - def gamma(self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + def gamma( + self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + ) -> float: ... # type: ignore[misc] @overload def gamma( self, @@ -556,13 +594,23 @@ class Generator: size: None | _ShapeLike = ..., ) -> NDArray[float64]: ... @overload - def f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def f( + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def f( - self, dfnum: _ArrayLikeFloat_co, dfden: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def noncentral_f(self, dfnum: _FloatLike_co, dfden: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_f( + self, + dfnum: _FloatLike_co, + dfden: _FloatLike_co, + nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def noncentral_f( self, @@ -578,10 +626,15 @@ class Generator: self, df: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def noncentral_chisquare(self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def noncentral_chisquare( + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def noncentral_chisquare( - self, df: _ArrayLikeFloat_co, nonc: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] @@ -594,10 +647,15 @@ class Generator: self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def vonmises(self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def vonmises( + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def vonmises( - self, mu: _ArrayLikeFloat_co, kappa: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] @@ -684,10 +742,15 @@ class Generator: self, scale: _ArrayLikeFloat_co = ..., size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload - def wald(self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + def wald( + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] @overload def wald( - self, mean: _ArrayLikeFloat_co, scale: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[float64]: ... @overload def triangular( @@ -712,10 +775,15 @@ class Generator: self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> NDArray[int64]: ... @overload - def negative_binomial(self, n: _FloatLike_co, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + def negative_binomial( + self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + ) -> int: ... # type: ignore[misc] @overload def negative_binomial( - self, n: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: None | _ShapeLike = ... ) -> NDArray[int64]: ... @overload def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] @@ -736,7 +804,9 @@ class Generator: self, p: _ArrayLikeFloat_co, size: None | _ShapeLike = ... ) -> NDArray[int64]: ... @overload - def hypergeometric(self, ngood: int, nbad: int, nsample: int, size: None = ...) -> int: ... # type: ignore[misc] + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = ... + ) -> int: ... # type: ignore[misc] @overload def hypergeometric( self, diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 03b0712d8c77..4c1c8abd927c 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -504,8 +504,8 @@ assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) -assert_type(def_gen.integers(0, 100), int) -assert_type(def_gen.integers(100), int) +assert_type(def_gen.integers(0, 100), np.int64) +assert_type(def_gen.integers(100), np.int64) assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) From 8adcf87fa8a57be3dc208997ec02b03355c300bd Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 17 Apr 2025 18:42:51 +0200 Subject: [PATCH 170/187] TYP: fix `count_nonzero` signature --- numpy/_core/numeric.pyi | 22 ++++++++++------------ numpy/typing/tests/data/reveal/numeric.pyi | 2 +- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index a5af4d372968..a31e03cd3a38 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -109,7 +109,7 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeTD64_co, _ArrayLikeObject_co, - _ArrayLikeUnknown, + _NestedSequence, ) __all__ = [ @@ -456,21 +456,19 @@ def full_like( device: None | L["cpu"] = ..., ) -> NDArray[Any]: ... +# @overload -def count_nonzero( - a: ArrayLike, - axis: None = ..., - *, - keepdims: L[False] = ..., -) -> int: ... +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> int: ... +@overload +def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... @overload def count_nonzero( - a: ArrayLike, - axis: _ShapeLike = ..., - *, - keepdims: bool = ..., -) -> Any: ... # TODO: np.intp or ndarray[np.intp] + a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] +) -> NDArray[np.intp]: ... +@overload +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... +# def isfortran(a: NDArray[Any] | generic) -> bool: ... def argwhere(a: ArrayLike) -> NDArray[intp]: ... diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 742ec2a4c827..90e6674a85e3 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -31,7 +31,7 @@ C: SubClass assert_type(np.count_nonzero(i8), int) assert_type(np.count_nonzero(AR_i8), int) assert_type(np.count_nonzero(B), int) -assert_type(np.count_nonzero(AR_i8, keepdims=True), Any) +assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) assert_type(np.count_nonzero(AR_i8, axis=0), Any) assert_type(np.isfortran(i8), bool) From 9005fc195c3d558e3b7462ab9f15a465458afdb5 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Apr 2025 06:30:32 -0600 Subject: [PATCH 171/187] TYP: Update numpy/_core/numeric.pyi Co-authored-by: Joren Hammudoglu --- numpy/_core/numeric.pyi | 1 + 1 file changed, 1 insertion(+) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index a31e03cd3a38..23e8a95878bb 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -109,6 +109,7 @@ from numpy._typing import ( _ArrayLikeComplex_co, _ArrayLikeTD64_co, _ArrayLikeObject_co, + _ArrayLikeUnknown, _NestedSequence, ) From d561f093770d1207b37a9b53203b92e10a912484 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Apr 2025 08:50:29 -0600 Subject: [PATCH 172/187] REL: Prepare for the NumPy 2.2.5 release [wheel build] - Create 2.2.5-changelog.rst. - Update 2.2.5-notes.rst. - Update .mailmap. --- .mailmap | 1 + doc/changelog/2.2.5-changelog.rst | 39 +++++++++++++++++++++++++++ doc/source/release/2.2.5-notes.rst | 43 ++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+) create mode 100644 doc/changelog/2.2.5-changelog.rst diff --git a/.mailmap b/.mailmap index 1ae0bce7f11a..f33dfddb6492 100644 --- a/.mailmap +++ b/.mailmap @@ -152,6 +152,7 @@ Ashutosh Singh <55102089+Ashutosh619-sudo@users.nor Auke Wiggers Austin Ran <504977925@qq.com> Badhri Narayanan Krishnakumar +Baskar Gopinath Bhavuk Kalra Bhavuk Kalra Bangcheng Yang diff --git a/doc/changelog/2.2.5-changelog.rst b/doc/changelog/2.2.5-changelog.rst new file mode 100644 index 000000000000..409c243d148e --- /dev/null +++ b/doc/changelog/2.2.5-changelog.rst @@ -0,0 +1,39 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Baskar Gopinath + +* Nathan Goldbaum +* Nicholas Christensen + +* Sayed Adel +* karl + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#28545 `__: MAINT: Prepare 2.2.x for further development +* `#28582 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28583 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28585 `__: TYP: fix typing errors in ``_core.strings`` +* `#28631 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28632 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28633 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28650 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28654 `__: BUG: fix deepcopying StringDType arrays (#28643) +* `#28661 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28663 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28682 `__: SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD +* `#28683 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28705 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28706 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28723 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28758 `__: TYP: some ``[arg]partition`` fixes +* `#28772 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28774 `__: TYP: fix ``count_nonzero`` signature diff --git a/doc/source/release/2.2.5-notes.rst b/doc/source/release/2.2.5-notes.rst index 3fa8dc85abe6..e1c3205b006d 100644 --- a/doc/source/release/2.2.5-notes.rst +++ b/doc/source/release/2.2.5-notes.rst @@ -5,6 +5,49 @@ NumPy 2.2.5 Release Notes ========================== NumPy 2.2.5 is a patch release that fixes bugs found after the 2.2.4 release. +It has a large number of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. This release supports Python versions 3.10-3.13. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Joren Hammudoglu +* Baskar Gopinath + +* Nathan Goldbaum +* Nicholas Christensen + +* Sayed Adel +* karl + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#28545 `__: MAINT: Prepare 2.2.x for further development +* `#28582 `__: BUG: Fix return type of NpyIter_GetIterNext in Cython declarations +* `#28583 `__: BUG: avoid deadlocks with C++ shared mutex in dispatch cache +* `#28585 `__: TYP: fix typing errors in ``_core.strings`` +* `#28631 `__: MAINT, CI: Update Ubuntu to 22.04 in azure-pipelines +* `#28632 `__: BUG: Set writeable flag for writeable dlpacks. +* `#28633 `__: BUG: Fix crackfortran parsing error when a division occurs within... +* `#28650 `__: TYP: fix ``ndarray.tolist()`` and ``.item()`` for unknown dtype +* `#28654 `__: BUG: fix deepcopying StringDType arrays (#28643) +* `#28661 `__: TYP: Accept objects that ``write()`` to ``str`` in ``savetxt`` +* `#28663 `__: CI: Replace QEMU armhf with native (32-bit compatibility mode) +* `#28682 `__: SIMD: Resolve Highway QSort symbol linking error on aarch32/ASIMD +* `#28683 `__: TYP: add missing ``"b1"`` literals for ``dtype[bool]`` +* `#28705 `__: TYP: Fix false rejection of ``NDArray[object_].__abs__()`` +* `#28706 `__: TYP: Fix inconsistent ``NDArray[float64].__[r]truediv__`` return... +* `#28723 `__: TYP: fix string-like ``ndarray`` rich comparison operators +* `#28758 `__: TYP: some ``[arg]partition`` fixes +* `#28772 `__: TYP: fix incorrect ``random.Generator.integers`` return type +* `#28774 `__: TYP: fix ``count_nonzero`` signature + From 37c6b1f5ef27f204973d85b4c056f5f155b65bc8 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 19 Apr 2025 17:43:26 -0600 Subject: [PATCH 173/187] MAINT: Prepare 2.2.x for further development - Create 2.2.6-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml [skip azp] [skip cirrus] [skip actions] --- doc/source/release.rst | 1 + doc/source/release/2.2.6-notes.rst | 13 +++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.6-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index c9d9b3bf36e7..559a89110a2b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.6 2.2.5 2.2.4 2.2.3 diff --git a/doc/source/release/2.2.6-notes.rst b/doc/source/release/2.2.6-notes.rst new file mode 100644 index 000000000000..572f0d976c03 --- /dev/null +++ b/doc/source/release/2.2.6-notes.rst @@ -0,0 +1,13 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.6 Release Notes +========================== + +NumPy 2.2.6 is a patch release that fixes bugs found after the 2.2.5 release. +It has a large number of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + + diff --git a/pavement.py b/pavement.py index 4caf4ba11a23..fbe3071137c3 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.5-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.6-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index a9223b22b326..c4d3e99cd77e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.5" +version = "2.2.6" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} From c8d7ace4ba76b247772d62130259bb1c27ce415a Mon Sep 17 00:00:00 2001 From: Sayed Adel Date: Sat, 26 Apr 2025 19:29:36 +0300 Subject: [PATCH 174/187] Update vendor-meson to fix module_feature conflicts arguments bug Fix incorrect handling of the compiler arguments 'conflicts' that was causing failures when detecting asmid instructions at compile time on aarch32 platforms. --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 0d93515fb826..7300f5fd4c1c 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 0d93515fb826440d19707eee47fd92655fe2f166 +Subproject commit 7300f5fd4c1c8b0406faeec4cc631f11f1ea324c From ee2a49cec5cd5af647404167e3f3289589445460 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Wed, 23 Apr 2025 11:51:54 -0600 Subject: [PATCH 175/187] BUG: fix heap buffer overflow in np.strings.find --- numpy/_core/src/umath/string_fastsearch.h | 12 ++---------- numpy/_core/tests/test_strings.py | 2 ++ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 54092d8b293d..95d0ee4fb214 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -670,16 +670,8 @@ preprocess(CheckedIndexer needle, Py_ssize_t len_needle, assert(p->period + p->cut <= len_needle); // Compare parts of the needle to check for periodicity. - int cmp; - if (std::is_same::value) { - cmp = memcmp(needle.buffer, - needle.buffer + (p->period * sizeof(npy_ucs4)), - (size_t) p->cut); - } - else { - cmp = memcmp(needle.buffer, needle.buffer + p->period, - (size_t) p->cut); - } + int cmp = memcmp(needle.buffer, needle.buffer + p->period, + (size_t) p->cut); p->is_periodic = (0 == cmp); // If periodic, gap is unused; otherwise, calculate period and gap. diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 9fe4c2693599..1aca4102d188 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -381,6 +381,8 @@ def test_str_len(self, in_, out, dt): None, [3, -1]), ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), ]) def test_find(self, a, sub, start, end, out, dt): if "😊" in a and dt == "S": From a46d745ce3569ed110d49d6c513888a5369e181b Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 24 Apr 2025 02:15:50 +0200 Subject: [PATCH 176/187] TYP: fix ``NDArray[floating] + float`` return type --- numpy/__init__.pyi | 52 +++++++++---------- numpy/typing/tests/data/reveal/arithmetic.pyi | 23 +++++++- 2 files changed, 48 insertions(+), 27 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index fac2e8ec3a1b..37c502852bd0 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2734,7 +2734,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __mod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -2759,7 +2759,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rmod__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -2784,7 +2784,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __divmod__(self: NDArray[floating[_64Bit]], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2805,7 +2805,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] @overload - def __rdivmod__(self: NDArray[floating[_64Bit]], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... @overload def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... @overload @@ -2826,11 +2826,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __add__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __add__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2863,11 +2863,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __radd__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __radd__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2900,11 +2900,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __sub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __sub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2937,11 +2937,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rsub__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rsub__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -2974,11 +2974,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __mul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __mul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3009,11 +3009,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3036,11 +3036,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __truediv__(self: _ArrayInt_co | NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3067,11 +3067,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: _ArrayInt_co | NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3104,7 +3104,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __floordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -3133,7 +3133,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] @overload - def __rfloordiv__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload @@ -3160,11 +3160,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload @@ -3191,11 +3191,11 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... @overload def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... @overload def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... @overload diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi index 61e8948de5be..c0b94bae08a1 100644 --- a/numpy/typing/tests/data/reveal/arithmetic.pyi +++ b/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -50,7 +50,8 @@ AR_c: npt.NDArray[np.complex128] AR_m: npt.NDArray[np.timedelta64] AR_M: npt.NDArray[np.datetime64] AR_O: npt.NDArray[np.object_] -AR_number: npt.NDArray[np.number[Any]] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] AR_Any: npt.NDArray[Any] AR_LIKE_b: list[bool] @@ -654,3 +655,23 @@ assert_type(AR_f + u4, npt.NDArray[np.float64]) # Any assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) From d4a3731dbfc4afba523f190dbf9dbe68595aee90 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 29 Apr 2025 08:33:12 -0600 Subject: [PATCH 177/187] BUG: set the array_owned flag on the StringDType singleton --- numpy/_core/src/multiarray/stringdtype/dtype.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index cb8265dd3d7a..90efc9667f55 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -852,14 +852,17 @@ init_string_dtype(void) return -1; } - PyArray_Descr *singleton = - NPY_DT_CALL_default_descr(&PyArray_StringDType); + PyArray_StringDTypeObject *singleton = + (PyArray_StringDTypeObject *)NPY_DT_CALL_default_descr(&PyArray_StringDType); if (singleton == NULL) { return -1; } - PyArray_StringDType.singleton = singleton; + // never associate the singleton with an array + singleton->array_owned = 1; + + PyArray_StringDType.singleton = (PyArray_Descr *)singleton; PyArray_StringDType.type_num = NPY_VSTRING; for (int i = 0; PyArray_StringDType_casts[i] != NULL; i++) { From 71e2b9c10dd36bf90bd3d23b9a823198a31d90df Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 29 Apr 2025 08:33:35 -0600 Subject: [PATCH 178/187] ENH: acquire the allocator lock when setting the array_owned flag --- numpy/_core/src/multiarray/stringdtype/dtype.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 90efc9667f55..0599a48ad9af 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -633,11 +633,16 @@ PyArray_Descr * stringdtype_finalize_descr(PyArray_Descr *dtype) { PyArray_StringDTypeObject *sdtype = (PyArray_StringDTypeObject *)dtype; + // acquire the allocator lock in case the descriptor we want to finalize + // is shared between threads, see gh-28813 + npy_string_allocator *allocator = NpyString_acquire_allocator(sdtype); if (sdtype->array_owned == 0) { sdtype->array_owned = 1; + NpyString_release_allocator(allocator); Py_INCREF(dtype); return dtype; } + NpyString_release_allocator(allocator); PyArray_StringDTypeObject *ret = (PyArray_StringDTypeObject *)new_stringdtype_instance( sdtype->na_object, sdtype->coerce); ret->array_owned = 1; From 48b50f90d82b99cdbba7c7428eb49ea61dd757c2 Mon Sep 17 00:00:00 2001 From: mattip Date: Fri, 14 Feb 2025 14:40:18 +0200 Subject: [PATCH 179/187] use OpenBLAS 0.3.29 --- .github/workflows/linux_musl.yml | 2 -- requirements/ci32_requirements.txt | 2 +- requirements/ci_requirements.txt | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux_musl.yml b/.github/workflows/linux_musl.yml index 18a6a5eefe4a..67226278171e 100644 --- a/.github/workflows/linux_musl.yml +++ b/.github/workflows/linux_musl.yml @@ -60,8 +60,6 @@ jobs: pip install -r requirements/build_requirements.txt -r requirements/test_requirements.txt # use meson to build and test - # the Duse-ilp64 is not needed with scipy-openblas wheels > 0.3.24.95.0 - # spin build --with-scipy-openblas=64 -- -Duse-ilp64=true spin build --with-scipy-openblas=64 spin test -j auto diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 437dbc90a9b7..5a7be719214a 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,3 +1,3 @@ spin==0.13 # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.28.0.2 +scipy-openblas32==0.3.29.0.0 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index ab255e648527..adf7d86558f0 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin==0.13 # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.28.0.2 -scipy-openblas64==0.3.28.0.2 +scipy-openblas32==0.3.29.0.0 +scipy-openblas64==0.3.29.0.0 From 770f06d21b1dd83342029350f6c8162dc0a38fbd Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 2 May 2025 08:42:22 -0600 Subject: [PATCH 180/187] MAINT: from_dlpack thread safety fixes (#28883) Fixes #28881 Moves the global variables defined in from_dlpack to the npy_static_pydata struct and initializes them during module init. --- numpy/_core/src/multiarray/dlpack.c | 31 +++----------------- numpy/_core/src/multiarray/npy_static_data.c | 16 ++++++++++ numpy/_core/src/multiarray/npy_static_data.h | 7 +++++ 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 4bea7f9fc1ab..ac37a04c30c6 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -504,36 +504,12 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } - /* Prepare the arguments to call objects __dlpack__() method */ - static PyObject *call_kwnames = NULL; - static PyObject *dl_cpu_device_tuple = NULL; - static PyObject *max_version = NULL; - - if (call_kwnames == NULL) { - call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); - if (call_kwnames == NULL) { - return NULL; - } - } - if (dl_cpu_device_tuple == NULL) { - dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); - if (dl_cpu_device_tuple == NULL) { - return NULL; - } - } - if (max_version == NULL) { - max_version = Py_BuildValue("(i,i)", 1, 0); - if (max_version == NULL) { - return NULL; - } - } - /* * Prepare arguments for the full call. We always forward copy and pass * our max_version. `device` is always passed as `None`, but if the user * provided a device, we will replace it with the "cpu": (1, 0). */ - PyObject *call_args[] = {obj, Py_None, copy, max_version}; + PyObject *call_args[] = {obj, Py_None, copy, npy_static_pydata.dl_max_version}; Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; /* If device is passed it must be "cpu" and replace it with (1, 0) */ @@ -544,12 +520,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } assert(device_request == NPY_DEVICE_CPU); - call_args[1] = dl_cpu_device_tuple; + call_args[1] = npy_static_pydata.dl_cpu_device_tuple; } PyObject *capsule = PyObject_VectorcallMethod( - npy_interned_str.__dlpack__, call_args, nargsf, call_kwnames); + npy_interned_str.__dlpack__, call_args, nargsf, + npy_static_pydata.dl_call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 2cc6ea72c26e..62e1fd3c1b15 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -184,6 +184,22 @@ initialize_static_globals(void) return -1; } + npy_static_pydata.dl_call_kwnames = + Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + if (npy_static_pydata.dl_call_kwnames == NULL) { + return -1; + } + + npy_static_pydata.dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_cpu_device_tuple == NULL) { + return -1; + } + + npy_static_pydata.dl_max_version = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_max_version == NULL) { + return -1; + } + /* * Initialize contents of npy_static_cdata struct * diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index d6ee4a8dc54d..287dc80e4c1f 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -138,6 +138,13 @@ typedef struct npy_static_pydata_struct { PyObject *GenericToVoidMethod; PyObject *ObjectToGenericMethod; PyObject *GenericToObjectMethod; + + /* + * Used in from_dlpack + */ + PyObject *dl_call_kwnames; + PyObject *dl_cpu_device_tuple; + PyObject *dl_max_version; } npy_static_pydata_struct; From c906f847f8ebfe0adec896e15d50d0c873d2dae5 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 6 May 2025 09:07:10 +0100 Subject: [PATCH 181/187] TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` --- numpy/__init__.pyi | 2 +- numpy/typing/tests/data/fail/array_like.pyi | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 37c502852bd0..0acb7a565987 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2551,7 +2551,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload # (dtype: ?, type: type[T]) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: CanIndex = 0) -> None: ... + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload def getfield(self, dtype: _DTypeLike[_SCT], offset: SupportsIndex = 0) -> NDArray[_SCT]: ... @overload diff --git a/numpy/typing/tests/data/fail/array_like.pyi b/numpy/typing/tests/data/fail/array_like.pyi index 133b5fd49700..a21101a993c7 100644 --- a/numpy/typing/tests/data/fail/array_like.pyi +++ b/numpy/typing/tests/data/fail/array_like.pyi @@ -14,3 +14,5 @@ scalar = np.int64(1) scalar.__array__(dtype=np.float64) # E: No overload variant array = np.array([1]) array.__array__(dtype=np.float64) # E: No overload variant + +array.setfield(np.eye(1), np.int32, (0, 1)) # E: No overload variant From 87d1d8a0dc43b240a2c5607b524940114f660b6f Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Tue, 6 May 2025 22:14:08 +0200 Subject: [PATCH 182/187] MAINT: Avoid dereferencing/strict aliasing warnings during complex casts in `npy_math.h` for C++ runs (#28892) * MAINT: Avoid type-punning complex casts in npy_math.h * MAINT: Add missing typecast in npy_cimagl --- numpy/_core/include/numpy/npy_math.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index d11df12b7ceb..abc784bc686c 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -363,7 +363,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[0]; + return z._Val[0]; #else return creal(z); #endif @@ -377,7 +377,7 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[1]; + return z._Val[1]; #else return cimag(z); #endif @@ -391,7 +391,7 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[0]; + return z._Val[0]; #else return crealf(z); #endif @@ -405,7 +405,7 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[1]; + return z._Val[1]; #else return cimagf(z); #endif @@ -419,7 +419,7 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[0]; + return (npy_longdouble)z._Val[0]; #else return creall(z); #endif @@ -433,7 +433,7 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[1]; + return (npy_longdouble)z._Val[1]; #else return cimagl(z); #endif From bd1c863f4a17ef29a2b16f64af0b9eb4c4d2edde Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Tue, 6 May 2025 16:15:10 -0400 Subject: [PATCH 183/187] BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. (#28898) When running the scipy 1.15 test suite test signal/tests/test_signaltools.py::test_lfilter_bad_object, with Python built in debug mode, we see the following error: ``` Fatal Python error: _Py_CheckSlotResult: Slot * of type float succeeded with an exception set ``` `None` ends up as the first argument to `dot`, and this triggers an error from PyFloat_Multiply. Once an error has occurred, we must avoid calling multiply again, since it asserts that PyErr_Occurred() is false if the output is a non-error, which will fail if an error was set at entry. --- numpy/_core/src/multiarray/multiarraymodule.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index d337a84e9baf..2d28a7a9ef4b 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1215,6 +1215,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, goto clean_ret; } + int needs_pyapi = PyDataType_FLAGCHK(PyArray_DESCR(ret), NPY_NEEDS_PYAPI); NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ret)); is1 = PyArray_STRIDES(ap1)[0]; is2 = PyArray_STRIDES(ap2)[0]; @@ -1225,6 +1226,9 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, n = n - n_left; for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); + if (needs_pyapi && PyErr_Occurred()) { + goto done; + } n++; ip2 -= is2; op += os; @@ -1236,19 +1240,21 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, op += os * (n1 - n2 + 1); } else { - for (i = 0; i < (n1 - n2 + 1); i++) { + for (i = 0; i < (n1 - n2 + 1) && (!needs_pyapi || !PyErr_Occurred()); + i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } } - for (i = 0; i < n_right; i++) { + for (i = 0; i < n_right && (!needs_pyapi || !PyErr_Occurred()); i++) { n--; dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } +done: NPY_END_THREADS_DESCR(PyArray_DESCR(ret)); if (PyErr_Occurred()) { goto clean_ret; From 5bad9dabb3b875922ffe74e6909986cf89a46a7c Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 14 May 2025 14:43:00 +0200 Subject: [PATCH 184/187] TYP: reject complex scalar types in ``ndarray.__ifloordiv__`` --- numpy/__init__.pyi | 74 +++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 40 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 0acb7a565987..cbd77a128ab9 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3356,6 +3356,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # @overload def __isub__( self: NDArray[unsignedinteger[Any]], @@ -3379,6 +3380,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # @overload def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3403,66 +3405,51 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __itruediv__( - self: NDArray[complexfloating[Any]], - other: _ArrayLikeComplex_co, + def __ipow__( + self: NDArray[unsignedinteger[Any]], + other: _ArrayLikeUInt_co | _IntLike_co, /, ) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... - + def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__( - self: NDArray[unsignedinteger[Any]], - other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload - def __ifloordiv__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + + # @overload - def __ifloordiv__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__( - self: NDArray[complexfloating[Any]], - other: _ArrayLikeComplex_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__imod__` @overload - def __ipow__( - self: NDArray[unsignedinteger[Any]], + def __ifloordiv__( + self: NDArray[unsignedinteger], other: _ArrayLikeUInt_co | _IntLike_co, - /, - ) -> ndarray[_ShapeT_co, _DType_co]: ... + / + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[signedinteger[Any]], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[float64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... - @overload - def __ipow__(self: NDArray[floating[Any]], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[signedinteger], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[complex128], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[complexfloating[Any]], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... @overload - def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + # keep in sync with `__ifloordiv__` @overload def __imod__( self: NDArray[unsignedinteger[Any]], @@ -3484,6 +3471,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__irshift__` @overload def __ilshift__( self: NDArray[unsignedinteger[Any]], @@ -3495,6 +3483,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__ilshift__` @overload def __irshift__( self: NDArray[unsignedinteger[Any]], @@ -3506,6 +3495,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__ixor__` and `__ior__` @overload def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3519,6 +3509,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__iand__` and `__ior__` @overload def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3532,6 +3523,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # keep in sync with `__iand__` and `__ixor__` @overload def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3545,6 +3537,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # @overload def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DType_co]: ... @overload @@ -3562,6 +3555,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DType_co]): @overload def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DType_co]: ... + # def __dlpack__( self: NDArray[number[Any]], /, From 248f0cba3bd4081bde896009b1f10e975a608eaa Mon Sep 17 00:00:00 2001 From: jorenham Date: Wed, 14 May 2025 19:49:14 +0200 Subject: [PATCH 185/187] TYP: add rejection-tests for complex ``ndarray`` floordiv --- numpy/typing/tests/data/fail/arithmetic.pyi | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/typing/tests/data/fail/arithmetic.pyi b/numpy/typing/tests/data/fail/arithmetic.pyi index 3d250c493cfb..29f3ab4e28d3 100644 --- a/numpy/typing/tests/data/fail/arithmetic.pyi +++ b/numpy/typing/tests/data/fail/arithmetic.pyi @@ -72,6 +72,11 @@ AR_i // AR_LIKE_m # E: Unsupported operand types AR_f // AR_LIKE_m # E: Unsupported operand types AR_c // AR_LIKE_m # E: Unsupported operand types +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] + # Array multiplication AR_b *= AR_LIKE_u # E: incompatible type From ed418286bf91434d6f8af7133645de7b4ea3f807 Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Thu, 15 May 2025 10:29:21 -0600 Subject: [PATCH 186/187] REL: Prepare for the NumPy 2.2.6 release [wheel build] - Create 2.2.6-changelog.rst. - Update 2.2.6-notes.rst. --- doc/changelog/2.2.6-changelog.rst | 32 ++++++++++++++++++++++++++++++ doc/source/release/2.2.6-notes.rst | 32 +++++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 doc/changelog/2.2.6-changelog.rst diff --git a/doc/changelog/2.2.6-changelog.rst b/doc/changelog/2.2.6-changelog.rst new file mode 100644 index 000000000000..16c62da4a927 --- /dev/null +++ b/doc/changelog/2.2.6-changelog.rst @@ -0,0 +1,32 @@ + +Contributors +============ + +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes (#28883) +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ diff --git a/doc/source/release/2.2.6-notes.rst b/doc/source/release/2.2.6-notes.rst index 572f0d976c03..974f59d640db 100644 --- a/doc/source/release/2.2.6-notes.rst +++ b/doc/source/release/2.2.6-notes.rst @@ -5,9 +5,39 @@ NumPy 2.2.6 Release Notes ========================== NumPy 2.2.6 is a patch release that fixes bugs found after the 2.2.5 release. -It has a large number of typing fixes/improvements as well as the normal bug +It is a mix of typing fixes/improvements as well as the normal bug fixes and some CI maintenance. This release supports Python versions 3.10-3.13. +Contributors +============ +A total of 8 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Ilhan Polat +* Joren Hammudoglu +* Marco Gorelli + +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Sayed Adel + +Pull requests merged +==================== + +A total of 11 pull requests were merged for this release. + +* `#28778 `__: MAINT: Prepare 2.2.x for further development +* `#28851 `__: BLD: Update vendor-meson to fix module_feature conflicts arguments... +* `#28852 `__: BUG: fix heap buffer overflow in np.strings.find +* `#28853 `__: TYP: fix ``NDArray[floating] + float`` return type +* `#28864 `__: BUG: fix stringdtype singleton thread safety +* `#28865 `__: MAINT: use OpenBLAS 0.3.29 +* `#28889 `__: MAINT: from_dlpack thread safety fixes +* `#28913 `__: TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` +* `#28915 `__: MAINT: Avoid dereferencing/strict aliasing warnings +* `#28916 `__: BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. +* `#28966 `__: TYP: reject complex scalar types in ndarray.__ifloordiv__ From ae76bc88d013688d89949a50304f19452b9777ed Mon Sep 17 00:00:00 2001 From: Charles Harris Date: Sat, 17 May 2025 16:53:28 -0600 Subject: [PATCH 187/187] MAINT: Prepare 2.2.x for further development - Create 2.2.7-notes.rst - Update release.rst - Update pavement.py - Update pyproject.toml [skip azp] [skip cirrus] [skip actions]] --- doc/source/release.rst | 1 + doc/source/release/2.2.7-notes.rst | 25 +++++++++++++++++++++++++ pavement.py | 2 +- pyproject.toml | 2 +- 4 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 doc/source/release/2.2.7-notes.rst diff --git a/doc/source/release.rst b/doc/source/release.rst index 559a89110a2b..6cef4da82790 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,7 @@ Release notes .. toctree:: :maxdepth: 2 + 2.2.7 2.2.6 2.2.5 2.2.4 diff --git a/doc/source/release/2.2.7-notes.rst b/doc/source/release/2.2.7-notes.rst new file mode 100644 index 000000000000..d312e4409353 --- /dev/null +++ b/doc/source/release/2.2.7-notes.rst @@ -0,0 +1,25 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.2.7 Release Notes +========================== + +NumPy 2.2.7 is a patch release that fixes bugs found after the 2.2.6 release. +It is a mix of typing fixes/improvements as well as the normal bug +fixes and some CI maintenance. + +This release supports Python versions 3.10-3.13. + + +Highlights +========== + +*We'll choose highlights for this release near the end of the release cycle.* + + +.. if release snippets have been incorporated already, uncomment the follow + line (leave the `.. include:: directive) + +.. **Content from release note snippets in doc/release/upcoming_changes:** + +.. include:: notes-towncrier.rst diff --git a/pavement.py b/pavement.py index fbe3071137c3..804749df9626 100644 --- a/pavement.py +++ b/pavement.py @@ -36,7 +36,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.2.6-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.2.7-notes.rst' #------------------------------------------------------- diff --git a/pyproject.toml b/pyproject.toml index c4d3e99cd77e..2bfedb9c8b20 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.2.6" +version = "2.2.7" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"}